feat(face-landmarks) merge face expressions and face centering (#11283)

* feat(face-landmarks) merge face expressions and face centering

* code review

* code review

* code review
This commit is contained in:
Avram Tudor 2022-04-04 16:09:14 +03:00 committed by GitHub
parent 4b84f71021
commit 2c165d4313
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 410 additions and 923 deletions

View File

@ -53,8 +53,6 @@ deploy-appbundle:
$(OUTPUT_DIR)/analytics-ga.js \
$(BUILD_DIR)/analytics-ga.min.js \
$(BUILD_DIR)/analytics-ga.min.js.map \
$(BUILD_DIR)/face-centering-worker.min.js \
$(BUILD_DIR)/face-centering-worker.min.js.map \
$(BUILD_DIR)/facial-expressions-worker.min.js \
$(BUILD_DIR)/facial-expressions-worker.min.js.map \
$(DEPLOY_DIR)

29
package-lock.json generated
View File

@ -51,9 +51,7 @@
"@react-navigation/native": "6.0.6",
"@react-navigation/stack": "6.0.11",
"@svgr/webpack": "4.3.2",
"@tensorflow-models/blazeface": "0.0.7",
"@tensorflow/tfjs-backend-wasm": "3.13.0",
"@tensorflow/tfjs-converter": "3.13.0",
"@tensorflow/tfjs-core": "3.13.0",
"@vladmandic/face-api": "1.6.4",
"@xmldom/xmldom": "0.7.5",
@ -4959,15 +4957,6 @@
"node": ">=8"
}
},
"node_modules/@tensorflow-models/blazeface": {
"version": "0.0.7",
"resolved": "https://registry.npmjs.org/@tensorflow-models/blazeface/-/blazeface-0.0.7.tgz",
"integrity": "sha512-+hInPkvHJoubfiXlmNuF3SCucZvU6W1PMC25IV99NSAftJUpKvLokfF93iX8UkOFQCXkPFbnLKacGfGlbjgvMw==",
"peerDependencies": {
"@tensorflow/tfjs-converter": "^3.1.0",
"@tensorflow/tfjs-core": "^3.1.0"
}
},
"node_modules/@tensorflow/tfjs-backend-cpu": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-backend-cpu/-/tfjs-backend-cpu-3.13.0.tgz",
@ -4995,14 +4984,6 @@
"@tensorflow/tfjs-core": "3.13.0"
}
},
"node_modules/@tensorflow/tfjs-converter": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-converter/-/tfjs-converter-3.13.0.tgz",
"integrity": "sha512-H2VpDTv9Ve0HBt7ttzz46DmnsPaiT0B+yJjVH3NebGZbgY9C8boBgJIsdyqfiqEWBS3WxF8h4rh58Hv5XXMgaQ==",
"peerDependencies": {
"@tensorflow/tfjs-core": "3.13.0"
}
},
"node_modules/@tensorflow/tfjs-core": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-core/-/tfjs-core-3.13.0.tgz",
@ -23434,11 +23415,6 @@
"loader-utils": "^1.2.3"
}
},
"@tensorflow-models/blazeface": {
"version": "0.0.7",
"resolved": "https://registry.npmjs.org/@tensorflow-models/blazeface/-/blazeface-0.0.7.tgz",
"integrity": "sha512-+hInPkvHJoubfiXlmNuF3SCucZvU6W1PMC25IV99NSAftJUpKvLokfF93iX8UkOFQCXkPFbnLKacGfGlbjgvMw=="
},
"@tensorflow/tfjs-backend-cpu": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-backend-cpu/-/tfjs-backend-cpu-3.13.0.tgz",
@ -23457,11 +23433,6 @@
"@types/emscripten": "~0.0.34"
}
},
"@tensorflow/tfjs-converter": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-converter/-/tfjs-converter-3.13.0.tgz",
"integrity": "sha512-H2VpDTv9Ve0HBt7ttzz46DmnsPaiT0B+yJjVH3NebGZbgY9C8boBgJIsdyqfiqEWBS3WxF8h4rh58Hv5XXMgaQ=="
},
"@tensorflow/tfjs-core": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-core/-/tfjs-core-3.13.0.tgz",

View File

@ -56,9 +56,7 @@
"@react-navigation/native": "6.0.6",
"@react-navigation/stack": "6.0.11",
"@svgr/webpack": "4.3.2",
"@tensorflow-models/blazeface": "0.0.7",
"@tensorflow/tfjs-backend-wasm": "3.13.0",
"@tensorflow/tfjs-converter": "3.13.0",
"@tensorflow/tfjs-core": "3.13.0",
"@vladmandic/face-api": "1.6.4",
"@xmldom/xmldom": "0.7.5",

View File

@ -20,7 +20,6 @@ import '../shared-video/middleware';
import '../settings/middleware';
import '../talk-while-muted/middleware';
import '../virtual-background/middleware';
import '../face-centering/middleware';
import '../facial-recognition/middleware';
import '../gifs/middleware';

View File

@ -2,7 +2,6 @@
import '../base/devices/reducer';
import '../e2ee/reducer';
import '../face-centering/reducer';
import '../facial-recognition/reducer';
import '../feedback/reducer';
import '../local-recording/reducer';

View File

@ -1,39 +0,0 @@
/**
* Redux action type dispatched in order to set the time interval in which
* the message to the face centering worker will be sent.
*
* {
* type: SET_DETECTION_TIME_INTERVAL,
* time: number
* }
*/
export const SET_DETECTION_TIME_INTERVAL = 'SET_DETECTION_TIME_INTERVAL';
/**
* Redux action type dispatched in order to set recognition active in the state.
*
* {
* type: START_FACE_RECOGNITION
* }
*/
export const START_FACE_RECOGNITION = 'START_FACE_RECOGNITION';
/**
* Redux action type dispatched in order to set recognition inactive in the state.
*
* {
* type: STOP_FACE_RECOGNITION
* }
*/
export const STOP_FACE_RECOGNITION = 'STOP_FACE_RECOGNITION';
/**
* Redux action type dispatched in order to update coordinates of a detected face.
*
* {
* type: UPDATE_FACE_COORDINATES,
* faceBox: Object({ left, bottom, right, top }),
* participantId: string
* }
*/
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';

View File

@ -1,139 +0,0 @@
import 'image-capture';
import { getCurrentConference } from '../base/conference';
import { getLocalParticipant, getParticipantCount } from '../base/participants';
import { getLocalVideoTrack } from '../base/tracks';
import { getBaseUrl } from '../base/util';
import '../facial-recognition/createImageBitmap';
import {
START_FACE_RECOGNITION,
STOP_FACE_RECOGNITION,
UPDATE_FACE_COORDINATES
} from './actionTypes';
import {
FACE_BOX_MESSAGE,
SEND_IMAGE_INTERVAL_MS
} from './constants';
import { sendDataToWorker, sendFaceBoxToParticipants } from './functions';
import logger from './logger';
/**
* Interval object for sending new image data to worker.
*/
let interval;
/**
* Object containing a image capture of the local track.
*/
let imageCapture;
/**
* Object where the face centering worker is stored.
*/
let worker;
/**
* Loads the worker.
*
* @returns {Function}
*/
export function loadWorker() {
return async function(dispatch: Function, getState: Function) {
if (navigator.product === 'ReactNative') {
logger.warn('Unsupported environment for face centering');
return;
}
const baseUrl = getBaseUrl();
let workerUrl = `${baseUrl}libs/face-centering-worker.min.js`;
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
workerUrl = window.URL.createObjectURL(workerBlob);
worker = new Worker(workerUrl, { name: 'Face Centering Worker' });
worker.onmessage = function(e: Object) {
const { type, value } = e.data;
// receives a message with the face(s) bounding box.
if (type === FACE_BOX_MESSAGE) {
const state = getState();
const conference = getCurrentConference(state);
const localParticipant = getLocalParticipant(state);
if (getParticipantCount(state) > 1) {
sendFaceBoxToParticipants(conference, value);
}
dispatch({
type: UPDATE_FACE_COORDINATES,
faceBox: value,
id: localParticipant.id
});
}
};
dispatch(startFaceRecognition());
};
}
/**
* Starts the recognition and detection of face position.
*
* @param {Track | undefined} track - Track for which to start detecting faces.
*
* @returns {Function}
*/
export function startFaceRecognition(track) {
return async function(dispatch: Function, getState: Function) {
if (!worker) {
return;
}
const state = getState();
const { recognitionActive } = state['features/face-centering'];
if (recognitionActive) {
logger.log('Face centering already active.');
return;
}
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
if (!localVideoTrack) {
logger.warn('Face centering is disabled due to missing local track.');
return;
}
dispatch({ type: START_FACE_RECOGNITION });
logger.log('Start face recognition');
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
const firstVideoTrack = stream.getVideoTracks()[0];
imageCapture = new ImageCapture(firstVideoTrack);
const { disableLocalVideoFlip, faceCoordinatesSharing } = state['features/base/config'];
interval = setInterval(() => {
sendDataToWorker(worker, imageCapture, faceCoordinatesSharing?.threshold, !disableLocalVideoFlip);
}, faceCoordinatesSharing?.captureInterval || SEND_IMAGE_INTERVAL_MS);
};
}
/**
* Stops the recognition and detection of face position.
*
* @returns {Function}
*/
export function stopFaceRecognition() {
return function(dispatch: Function) {
clearInterval(interval);
interval = null;
imageCapture = null;
dispatch({ type: STOP_FACE_RECOGNITION });
logger.log('Stop face recognition');
};
}

View File

@ -1,20 +0,0 @@
/**
* Type of message sent from main thread to worker that contain image data and
* will trigger a response message from the worker containing the detected face(s) bounding box if any.
*/
export const DETECT_FACE_BOX = 'DETECT_FACE_BOX';
/**
* Type of event sent on the data channel.
*/
export const FACE_BOX_EVENT_TYPE = 'face-box';
/**
* Type of message sent from the worker to main thread that contains a face box or undefined.
*/
export const FACE_BOX_MESSAGE = 'face-box';
/**
* Miliseconds interval value for sending new image data to the worker.
*/
export const SEND_IMAGE_INTERVAL_MS = 100;

View File

@ -1,107 +0,0 @@
import * as blazeface from '@tensorflow-models/blazeface';
import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
import * as tf from '@tensorflow/tfjs-core';
import { FACE_BOX_MESSAGE, DETECT_FACE_BOX } from './constants';
/**
* Indicates whether an init error occured.
*/
let initError = false;
/**
* The blazeface model.
*/
let model;
/**
* A flag that indicates whether the tensorflow backend is set or not.
*/
let backendSet = false;
/**
* Flag for indicating whether an init operation (e.g setting tf backend) is in progress.
*/
let initInProgress = false;
/**
* Callbacks queue for avoiding overlapping executions of face detection.
*/
const queue = [];
/**
* Contains the last valid face bounding box (passes threshold validation) which was sent to the main process.
*/
let lastValidFaceBox;
const detect = async message => {
const { baseUrl, image, isHorizontallyFlipped, threshold } = message.data;
if (initInProgress || initError) {
return;
}
if (!backendSet) {
initInProgress = true;
setWasmPaths(`${baseUrl}libs/`);
try {
await tf.setBackend('wasm');
} catch (err) {
initError = true;
return;
}
backendSet = true;
initInProgress = false;
}
// load face detection model
if (!model) {
try {
model = await blazeface.load();
} catch (err) {
initError = true;
return;
}
}
tf.engine().startScope();
const imageTensor = tf.browser.fromPixels(image);
const detections = await model.estimateFaces(imageTensor, false, isHorizontallyFlipped, false);
tf.engine().endScope();
let faceBox;
if (detections.length) {
faceBox = {
// normalize to percentage based
left: Math.round(Math.min(...detections.map(d => d.topLeft[0])) * 100 / image.width),
right: Math.round(Math.max(...detections.map(d => d.bottomRight[0])) * 100 / image.width),
top: Math.round(Math.min(...detections.map(d => d.topLeft[1])) * 100 / image.height),
bottom: Math.round(Math.max(...detections.map(d => d.bottomRight[1])) * 100 / image.height)
};
if (lastValidFaceBox && Math.abs(lastValidFaceBox.left - faceBox.left) < threshold) {
return;
}
lastValidFaceBox = faceBox;
self.postMessage({
type: FACE_BOX_MESSAGE,
value: faceBox
});
}
};
onmessage = function(message) {
if (message.data.id === DETECT_FACE_BOX) {
queue.push(() => detect(message));
queue.shift()();
}
};

View File

@ -1,112 +0,0 @@
import { getBaseUrl } from '../base/util';
import { FACE_BOX_EVENT_TYPE, DETECT_FACE_BOX } from './constants';
import logger from './logger';
/**
* Sends the face box to all the other participants.
*
* @param {Object} conference - The current conference.
* @param {Object} faceBox - Face box to be sent.
* @returns {void}
*/
export function sendFaceBoxToParticipants(
conference: Object,
faceBox: Object
): void {
try {
conference.sendEndpointMessage('', {
type: FACE_BOX_EVENT_TYPE,
faceBox
});
} catch (err) {
logger.warn('Could not broadcast the face box to the other participants', err);
}
}
/**
* Sends the image data a canvas from the track in the image capture to the face centering worker.
*
* @param {Worker} worker - Face centering worker.
* @param {Object} imageCapture - Image capture that contains the current track.
* @param {number} threshold - Movement threshold as percentage for sharing face coordinates.
* @param {boolean} isHorizontallyFlipped - Indicates whether the image is horizontally flipped.
* @returns {Promise<void>}
*/
export async function sendDataToWorker(
worker: Worker,
imageCapture: Object,
threshold: number = 10,
isHorizontallyFlipped = true
): Promise<void> {
if (imageCapture === null || imageCapture === undefined) {
return;
}
let imageBitmap;
let image;
try {
imageBitmap = await imageCapture.grabFrame();
} catch (err) {
logger.warn(err);
return;
}
if (typeof OffscreenCanvas === 'undefined') {
const canvas = document.createElement('canvas');
const context = canvas.getContext('2d');
canvas.width = imageBitmap.width;
canvas.height = imageBitmap.height;
context.drawImage(imageBitmap, 0, 0);
image = context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
} else {
image = imageBitmap;
}
worker.postMessage({
id: DETECT_FACE_BOX,
baseUrl: getBaseUrl(),
image,
threshold,
isHorizontallyFlipped
});
imageBitmap.close();
}
/**
* Gets face box for a participant id.
*
* @param {string} id - The participant id.
* @param {Object} state - The redux state.
* @returns {Object}
*/
export function getFaceBoxForId(id: string, state: Object) {
return state['features/face-centering'].faceBoxes[id];
}
/**
* Gets the video object position for a participant id.
*
* @param {Object} state - The redux state.
* @param {string} id - The participant id.
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
*/
export function getVideoObjectPosition(state: Object, id: string) {
const faceBox = getFaceBoxForId(id, state);
if (faceBox) {
const { left, right, top, bottom } = faceBox;
const horizontalPos = 100 - Math.round((left + right) / 2, 100);
const verticalPos = 100 - Math.round((top + bottom) / 2, 100);
return `${horizontalPos}% ${verticalPos}%`;
}
return '50% 50%';
}

View File

@ -1,3 +0,0 @@
import { getLogger } from '../base/logging/functions';
export default getLogger('features/face-centering');

View File

@ -1,103 +0,0 @@
import {
CONFERENCE_JOINED,
CONFERENCE_WILL_LEAVE,
getCurrentConference
} from '../base/conference';
import { JitsiConferenceEvents } from '../base/lib-jitsi-meet';
import { MiddlewareRegistry } from '../base/redux';
import { TRACK_UPDATED, TRACK_REMOVED, TRACK_ADDED } from '../base/tracks';
import { UPDATE_FACE_COORDINATES } from './actionTypes';
import {
loadWorker,
stopFaceRecognition,
startFaceRecognition
} from './actions';
import { FACE_BOX_EVENT_TYPE } from './constants';
MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
const state = getState();
const { faceCoordinatesSharing } = state['features/base/config'];
if (!getCurrentConference(state)) {
return next(action);
}
if (action.type === CONFERENCE_JOINED) {
if (faceCoordinatesSharing?.enabled) {
dispatch(loadWorker());
}
// allow using remote face centering data when local face centering is not enabled
action.conference.on(
JitsiConferenceEvents.ENDPOINT_MESSAGE_RECEIVED,
(participant, eventData) => {
if (!participant || !eventData) {
return;
}
if (eventData.type === FACE_BOX_EVENT_TYPE) {
dispatch({
type: UPDATE_FACE_COORDINATES,
faceBox: eventData.faceBox,
id: participant.getId()
});
}
});
return next(action);
}
if (!faceCoordinatesSharing?.enabled) {
return next(action);
}
switch (action.type) {
case CONFERENCE_WILL_LEAVE : {
dispatch(stopFaceRecognition());
return next(action);
}
case TRACK_ADDED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (videoType === 'camera' && isLocal()) {
// need to pass this since the track is not yet added in the store
dispatch(startFaceRecognition(action.track));
}
return next(action);
}
case TRACK_UPDATED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (videoType !== 'camera' || !isLocal()) {
return next(action);
}
const { muted } = action.track;
if (muted !== undefined) {
// addresses video mute state changes
if (muted) {
dispatch(stopFaceRecognition());
} else {
dispatch(startFaceRecognition());
}
}
return next(action);
}
case TRACK_REMOVED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (videoType === 'camera' && isLocal()) {
dispatch(stopFaceRecognition());
}
return next(action);
}
}
return next(action);
});

View File

@ -1,55 +0,0 @@
import { ReducerRegistry } from '../base/redux';
import {
START_FACE_RECOGNITION,
STOP_FACE_RECOGNITION,
UPDATE_FACE_COORDINATES
} from './actionTypes';
/**
* The default state object.
*/
const defaultState = {
/**
* Map of participant ids containing their respective facebox in the shape of a left, right, bottom, top percentages
* The percentages indicate the distance of the detected face starting edge (top or left) to the corresponding edge.
*
* Examples:
* 70% left indicates a 70% distance from the left edge of the video to the left edge of the detected face.
* 70% right indicates a 70% distance from the right edge of the video to the left edge of the detected face.
* 30% top indicates a 30% distance from the top edge of the video to the top edge of the detected face.
* 30% bottom indicates a 30% distance from the bottom edge of the video to the top edge of the detected face.
*/
faceBoxes: {},
/**
* Flag indicating whether face recognition is currently running.
*/
recognitionActive: false
};
ReducerRegistry.register('features/face-centering', (state = defaultState, action) => {
switch (action.type) {
case UPDATE_FACE_COORDINATES: {
return {
...state,
faceBoxes: {
...state.faceBoxes,
[action.id]: action.faceBox
}
};
}
case START_FACE_RECOGNITION: {
return {
...state,
recognitionActive: true
};
}
case STOP_FACE_RECOGNITION: {
return defaultState;
}
}
return state;
});

View File

@ -11,17 +11,6 @@
*/
export const ADD_FACIAL_EXPRESSION = 'ADD_FACIAL_EXPRESSION';
/**
* Redux action type dispatched in order to set the time interval in which
* the message to the facial expression worker will be sent.
*
* {
* type: SET_DETECTION_TIME_INTERVAL,
* time: number
* }
*/
export const SET_DETECTION_TIME_INTERVAL = 'SET_DETECTION_TIME_INTERVAL';
/**
* Redux action type dispatched in order to set recognition active in the state.
*
@ -57,3 +46,14 @@ export const CLEAR_FACIAL_EXPRESSIONS_BUFFER = 'CLEAR_FACIAL_EXPRESSIONS_BUFFER'
* }
*/
export const ADD_TO_FACIAL_EXPRESSIONS_BUFFER = 'ADD_TO_FACIAL_EXPRESSIONS_BUFFER ';
/**
* Redux action type dispatched in order to update coordinates of a detected face.
*
* {
* type: UPDATE_FACE_COORDINATES,
* faceBox: Object({ left, bottom, right, top }),
* participantId: string
* }
*/
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';

View File

@ -2,6 +2,8 @@
import 'image-capture';
import './createImageBitmap';
import { getCurrentConference } from '../base/conference';
import { getLocalParticipant, getParticipantCount } from '../base/participants';
import { getLocalVideoTrack } from '../base/tracks';
import { getBaseUrl } from '../base/util';
@ -9,18 +11,21 @@ import {
ADD_FACIAL_EXPRESSION,
ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
CLEAR_FACIAL_EXPRESSIONS_BUFFER,
SET_DETECTION_TIME_INTERVAL,
START_FACIAL_RECOGNITION,
STOP_FACIAL_RECOGNITION
STOP_FACIAL_RECOGNITION,
UPDATE_FACE_COORDINATES
} from './actionTypes';
import {
CLEAR_TIMEOUT,
FACIAL_EXPRESSION_MESSAGE,
DETECTION_TYPES,
INIT_WORKER,
INTERVAL_MESSAGE,
WEBHOOK_SEND_TIME_INTERVAL
} from './constants';
import { sendDataToWorker, sendFacialExpressionsWebhook } from './functions';
import {
getDetectionInterval,
sendDataToWorker,
sendFaceBoxToParticipants,
sendFacialExpressionsWebhook
} from './functions';
import logger from './logger';
/**
@ -52,7 +57,12 @@ let duplicateConsecutiveExpressions = 0;
/**
* Variable that keeps the interval for sending expressions to webhook.
*/
let sendInterval;
let webhookSendInterval;
/**
* Variable that keeps the interval for detecting faces in a frame.
*/
let detectionInterval;
/**
* Loads the worker that predicts the facial expression.
@ -60,61 +70,75 @@ let sendInterval;
* @returns {void}
*/
export function loadWorker() {
return function(dispatch: Function) {
if (!window.Worker) {
logger.warn('Browser does not support web workers');
return function(dispatch: Function, getState: Function) {
if (worker) {
logger.info('Worker has already been initialized');
return;
}
const baseUrl = `${getBaseUrl()}/libs/`;
if (navigator.product === 'ReactNative') {
logger.warn('Unsupported environment for face recognition');
return;
}
const baseUrl = `${getBaseUrl()}libs/`;
let workerUrl = `${baseUrl}facial-expressions-worker.min.js`;
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
workerUrl = window.URL.createObjectURL(workerBlob);
worker = new Worker(workerUrl, { name: 'Facial Expression Worker' });
worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
worker.onmessage = function(e: Object) {
const { type, value } = e.data;
const { faceExpression, faceBox } = e.data;
// receives a message indicating what type of backend tfjs decided to use.
// it is received after as a response to the first message sent to the worker.
if (type === INTERVAL_MESSAGE) {
value && dispatch(setDetectionTimeInterval(value));
}
// receives a message with the predicted facial expression.
if (type === FACIAL_EXPRESSION_MESSAGE) {
sendDataToWorker(worker, imageCapture);
if (!value) {
return;
}
if (value === lastFacialExpression) {
if (faceExpression) {
if (faceExpression === lastFacialExpression) {
duplicateConsecutiveExpressions++;
} else {
if (lastFacialExpression && lastFacialExpressionTimestamp) {
dispatch(
addFacialExpression(
dispatch(addFacialExpression(
lastFacialExpression,
duplicateConsecutiveExpressions + 1,
lastFacialExpressionTimestamp
)
);
));
}
lastFacialExpression = value;
lastFacialExpression = faceExpression;
lastFacialExpressionTimestamp = Date.now();
duplicateConsecutiveExpressions = 0;
}
}
if (faceBox) {
const state = getState();
const conference = getCurrentConference(state);
const localParticipant = getLocalParticipant(state);
if (getParticipantCount(state) > 1) {
sendFaceBoxToParticipants(conference, faceBox);
}
dispatch({
type: UPDATE_FACE_COORDINATES,
faceBox,
id: localParticipant.id
});
}
};
const { enableFacialRecognition, faceCoordinatesSharing } = getState()['features/base/config'];
const detectionTypes = [
faceCoordinatesSharing?.enabled && DETECTION_TYPES.FACE_BOX,
enableFacialRecognition && DETECTION_TYPES.FACE_EXPRESSIONS
].filter(Boolean);
worker.postMessage({
type: INIT_WORKER,
url: baseUrl,
windowScreenSize: window.screen ? {
width: window.screen.width,
height: window.screen.height
} : undefined
baseUrl,
detectionTypes
});
dispatch(startFacialRecognition());
};
}
@ -122,10 +146,10 @@ export function loadWorker() {
/**
* Starts the recognition and detection of face expressions.
*
* @param {Object} stream - Video stream.
* @param {Track | undefined} track - Track for which to start detecting faces.
* @returns {Function}
*/
export function startFacialRecognition() {
export function startFacialRecognition(track) {
return async function(dispatch: Function, getState: Function) {
if (!worker) {
return;
@ -135,33 +159,46 @@ export function startFacialRecognition() {
const { recognitionActive } = state['features/facial-recognition'];
if (recognitionActive) {
logger.log('Face recognition already active.');
return;
}
const localVideoTrack = getLocalVideoTrack(state['features/base/tracks']);
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
if (localVideoTrack === undefined) {
logger.warn('Facial recognition is disabled due to missing local track.');
return;
}
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
if (stream === null) {
return;
}
dispatch({ type: START_FACIAL_RECOGNITION });
logger.log('Start face recognition');
const firstVideoTrack = stream.getVideoTracks()[0];
const { enableFacialRecognition, faceCoordinatesSharing } = state['features/base/config'];
// $FlowFixMe
imageCapture = new ImageCapture(firstVideoTrack);
sendDataToWorker(worker, imageCapture);
sendInterval = setInterval(async () => {
const result = await sendFacialExpressionsWebhook(getState());
if (result) {
dispatch(clearFacialExpressionBuffer());
}
detectionInterval = setInterval(() => {
sendDataToWorker(
worker,
imageCapture,
faceCoordinatesSharing?.threshold
);
}, getDetectionInterval(state));
if (enableFacialRecognition) {
webhookSendInterval = setInterval(async () => {
const result = await sendFacialExpressionsWebhook(getState());
if (result) {
dispatch(clearFacialExpressionBuffer());
}
}, WEBHOOK_SEND_TIME_INTERVAL);
}
, WEBHOOK_SEND_TIME_INTERVAL);
};
}
@ -171,73 +208,30 @@ export function startFacialRecognition() {
* @returns {void}
*/
export function stopFacialRecognition() {
return function(dispatch: Function, getState: Function) {
const state = getState();
const { recognitionActive } = state['features/facial-recognition'];
if (!recognitionActive) {
imageCapture = null;
return;
}
imageCapture = null;
worker.postMessage({
type: CLEAR_TIMEOUT
});
return function(dispatch: Function) {
if (lastFacialExpression && lastFacialExpressionTimestamp) {
dispatch(
addFacialExpression(
lastFacialExpression,
duplicateConsecutiveExpressions + 1,
lastFacialExpressionTimestamp
)
addFacialExpression(
lastFacialExpression,
duplicateConsecutiveExpressions + 1,
lastFacialExpressionTimestamp
)
);
}
duplicateConsecutiveExpressions = 0;
if (sendInterval) {
clearInterval(sendInterval);
sendInterval = null;
}
clearInterval(webhookSendInterval);
clearInterval(detectionInterval);
duplicateConsecutiveExpressions = 0;
webhookSendInterval = null;
detectionInterval = null;
imageCapture = null;
dispatch({ type: STOP_FACIAL_RECOGNITION });
logger.log('Stop face recognition');
};
}
/**
* Resets the track in the image capture.
*
* @returns {void}
*/
export function resetTrack() {
return function(dispatch: Function, getState: Function) {
const state = getState();
const { jitsiTrack: localVideoTrack } = getLocalVideoTrack(state['features/base/tracks']);
const stream = localVideoTrack.getOriginalStream();
const firstVideoTrack = stream.getVideoTracks()[0];
// $FlowFixMe
imageCapture = new ImageCapture(firstVideoTrack);
};
}
/**
* Changes the track from the image capture with a given one.
*
* @param {Object} track - The track that will be in the new image capture.
* @returns {void}
*/
export function changeTrack(track: Object) {
const { jitsiTrack } = track;
const stream = jitsiTrack.getOriginalStream();
const firstVideoTrack = stream.getVideoTracks()[0];
// $FlowFixMe
imageCapture = new ImageCapture(firstVideoTrack);
}
/**
* Adds a new facial expression and its duration.
*
@ -248,12 +242,8 @@ export function changeTrack(track: Object) {
*/
function addFacialExpression(facialExpression: string, duration: number, timestamp: number) {
return function(dispatch: Function, getState: Function) {
const { detectionTimeInterval } = getState()['features/facial-recognition'];
let finalDuration = duration;
const finalDuration = duration * getDetectionInterval(getState()) / 1000;
if (detectionTimeInterval !== -1) {
finalDuration *= detectionTimeInterval / 1000;
}
dispatch({
type: ADD_FACIAL_EXPRESSION,
facialExpression,
@ -263,19 +253,6 @@ function addFacialExpression(facialExpression: string, duration: number, timesta
};
}
/**
* Sets the time interval for the detection worker post message.
*
* @param {number} time - The time interval.
* @returns {Object}
*/
function setDetectionTimeInterval(time: number) {
return {
type: SET_DETECTION_TIME_INTERVAL,
time
};
}
/**
* Adds a facial expression with its timestamp to the facial expression buffer.
*

View File

@ -13,16 +13,6 @@ export const FACIAL_EXPRESSION_EMOJIS = {
export const FACIAL_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ];
/**
* Time used for detection interval when facial expressions worker uses webgl backend.
*/
export const WEBGL_TIME_INTERVAL = 1000;
/**
* Time used for detection interval when facial expression worker uses cpu backend.
*/
export const CPU_TIME_INTERVAL = 6000;
/**
* Time is ms used for sending expression.
*/
@ -34,25 +24,26 @@ export const WEBHOOK_SEND_TIME_INTERVAL = 15000;
*/
export const INIT_WORKER = 'INIT_WORKER';
/**
* Type of event sent on the data channel.
*/
export const FACE_BOX_EVENT_TYPE = 'face-box';
/**
* Miliseconds interval value for sending new image data to the worker.
*/
export const SEND_IMAGE_INTERVAL_MS = 1000;
/**
* Type of message sent from main thread to worker that contain image data and
* will set a timeout for sending back the expression if detected in the worker.
* will trigger a response message from the worker containing the detected face(s) info.
*/
export const SET_TIMEOUT = 'SET_TIMEOUT';
export const DETECT_FACE = 'DETECT_FACE';
/**
* Type of message sent from main thread to worker that will stop the recognition;
* the worker will clear the timeout and then will send nothing back.
* Available detection types.
*/
export const CLEAR_TIMEOUT = 'CLEAR_TIMEOUT';
/**
* Type of message sent from the worker to main thread that contains a facial expression or undefined.
*/
export const FACIAL_EXPRESSION_MESSAGE = 'FACIAL_EXPRESSION_MESSAGE_TYPE';
/**
* Type of message sent from the worker to main thread that contains the time interval chosen by the worker.
*/
export const INTERVAL_MESSAGE = 'INTERVAL_MESSAGE_TYPE';
export const DETECTION_TYPES = {
FACE_BOX: 'face-box',
FACE_EXPRESSIONS: 'face-expressions'
};

View File

@ -2,7 +2,9 @@
// From: https://github.com/justadudewhohacks/face-api.js/issues/47
// This is needed because face-api.js does not support working in a WebWorker natively
// Updated Dec 1 2020 to work on latest Chrome (tested in WebWorkers on Chrome Mobile on Android / Google Pixel 3 as well)
self.useWasm = false;
if(!self.OffscreenCanvas) {
self.useWasm = true;
self.OffscreenCanvas = class OffscreenCanvas {
constructor() {

View File

@ -1,120 +1,158 @@
// @flow
import './faceApiPatch';
import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
import * as faceapi from '@vladmandic/face-api';
import {
CLEAR_TIMEOUT,
CPU_TIME_INTERVAL,
FACIAL_EXPRESSION_MESSAGE,
INIT_WORKER,
SET_TIMEOUT,
INTERVAL_MESSAGE,
WEBGL_TIME_INTERVAL
} from './constants';
import { DETECTION_TYPES, DETECT_FACE, INIT_WORKER } from './constants';
/**
* A flag that indicates whether the tensorflow models were loaded or not.
* Detection types to be applied.
*/
let faceDetectionTypes = [];
/**
* Indicates whether an init error occured.
*/
let initError = false;
/**
* A flag that indicates whether the models are loaded or not.
*/
let modelsLoaded = false;
/**
* The url where the models for the facial detection of expressions are located.
*/
let modelsURL;
/**
* A flag that indicates whether the tensorflow backend is set or not.
*/
let backendSet = false;
/**
* A timer variable for set interval.
* Flag for indicating whether a face detection flow is in progress or not.
*/
let timer;
let detectionInProgress = false;
/**
* The duration of the set timeout.
* Contains the last valid face bounding box (passes threshold validation) which was sent to the main process.
*/
let timeoutDuration = -1;
let lastValidFaceBox;
/**
* A patch for having window object in the worker.
*/
const window = {
screen: {
width: 1280,
height: 720
const detectFaceBox = async ({ detections, threshold }) => {
if (!detections.length) {
return null;
}
const faceBox = {
// normalize to percentage based
left: Math.round(Math.min(...detections.map(d => d.relativeBox.left)) * 100),
right: Math.round(Math.max(...detections.map(d => d.relativeBox.right)) * 100)
};
faceBox.width = Math.round(faceBox.right - faceBox.left);
if (lastValidFaceBox && Math.abs(lastValidFaceBox.left - faceBox.left) < threshold) {
return null;
}
lastValidFaceBox = faceBox;
return faceBox;
};
onmessage = async function(message) {
switch (message.data.type) {
case INIT_WORKER : {
modelsURL = message.data.url;
if (message.data.windowScreenSize) {
window.screen = message.data.windowScreenSize;
}
break;
}
const detectFaceExpression = async ({ detections }) =>
detections[0]?.expressions.asSortedArray()[0].expression;
case SET_TIMEOUT : {
if (!message.data.imageBitmap || !modelsURL) {
self.postMessage({
type: FACIAL_EXPRESSION_MESSAGE,
value: null
});
}
const detect = async ({ image, threshold }) => {
let detections;
let faceExpression;
let faceBox;
// the models are loaded
if (!modelsLoaded) {
await faceapi.loadTinyFaceDetectorModel(modelsURL);
await faceapi.loadFaceExpressionModel(modelsURL);
modelsLoaded = true;
}
faceapi.tf.engine().startScope();
const tensor = faceapi.tf.browser.fromPixels(message.data.imageBitmap);
const detections = await faceapi.detectSingleFace(
tensor,
new faceapi.TinyFaceDetectorOptions()
detectionInProgress = true;
faceapi.tf.engine().startScope();
const imageTensor = faceapi.tf.browser.fromPixels(image);
if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
detections = await faceapi.detectAllFaces(
imageTensor,
new faceapi.TinyFaceDetectorOptions()
).withFaceExpressions();
// The backend is set
if (!backendSet) {
const backend = faceapi.tf.getBackend();
faceExpression = await detectFaceExpression({ detections });
}
if (backend) {
if (backend === 'webgl') {
timeoutDuration = WEBGL_TIME_INTERVAL;
} else if (backend === 'cpu') {
timeoutDuration = CPU_TIME_INTERVAL;
}
self.postMessage({
type: INTERVAL_MESSAGE,
value: timeoutDuration
});
backendSet = true;
if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX)) {
detections = detections
? detections.map(d => d.detection)
: await faceapi.detectAllFaces(imageTensor, new faceapi.TinyFaceDetectorOptions());
faceBox = await detectFaceBox({
detections,
threshold
});
}
faceapi.tf.engine().endScope();
if (faceBox || faceExpression) {
self.postMessage({
faceBox,
faceExpression
});
}
detectionInProgress = false;
};
const init = async ({ baseUrl, detectionTypes }) => {
faceDetectionTypes = detectionTypes;
if (!backendSet) {
try {
if (self.useWasm) {
setWasmPaths(baseUrl);
await faceapi.tf.setBackend('wasm');
} else {
await faceapi.tf.setBackend('webgl');
}
}
faceapi.tf.engine().endScope();
let facialExpression;
backendSet = true;
} catch (err) {
initError = true;
if (detections) {
facialExpression = detections.expressions.asSortedArray()[0].expression;
return;
}
timer = setTimeout(() => {
self.postMessage({
type: FACIAL_EXPRESSION_MESSAGE,
value: facialExpression
});
}, timeoutDuration);
}
// load face detection model
if (!modelsLoaded) {
try {
await faceapi.loadTinyFaceDetectorModel(baseUrl);
if (detectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
await faceapi.loadFaceExpressionModel(baseUrl);
}
modelsLoaded = true;
} catch (err) {
initError = true;
return;
}
}
};
onmessage = function(message) {
switch (message.data.type) {
case DETECT_FACE: {
if (!backendSet || !modelsLoaded || initError || detectionInProgress) {
return;
}
detect(message.data);
break;
}
case CLEAR_TIMEOUT: {
if (timer) {
clearTimeout(timer);
timer = null;
}
case INIT_WORKER: {
init(message.data);
break;
}
}

View File

@ -2,9 +2,17 @@
import { getLocalParticipant } from '../base/participants';
import { extractFqnFromPath } from '../dynamic-branding';
import { SET_TIMEOUT } from './constants';
import { DETECT_FACE, FACE_BOX_EVENT_TYPE, SEND_IMAGE_INTERVAL_MS } from './constants';
import logger from './logger';
let canvas;
let context;
if (typeof OffscreenCanvas === 'undefined') {
canvas = document.createElement('canvas');
context = canvas.getContext('2d');
}
/**
* Sends the facial expression with its duration to all the other participants.
*
@ -30,6 +38,27 @@ export function sendFacialExpressionToParticipants(
}
/**
* Sends the face box to all the other participants.
*
* @param {Object} conference - The current conference.
* @param {Object} faceBox - Face box to be sent.
* @returns {void}
*/
export function sendFaceBoxToParticipants(
conference: Object,
faceBox: Object
): void {
try {
conference.sendEndpointMessage('', {
type: FACE_BOX_EVENT_TYPE,
faceBox
});
} catch (err) {
logger.warn('Could not broadcast the face box to the other participants', err);
}
}
/**
* Sends the facial expression with its duration to xmpp server.
*
@ -107,21 +136,26 @@ export async function sendFacialExpressionsWebhook(state: Object) {
return false;
}
/**
* Sends the image data a canvas from the track in the image capture to the facial expression worker.
* Sends the image data a canvas from the track in the image capture to the face recognition worker.
*
* @param {Worker} worker - Facial expression worker.
* @param {Worker} worker - Face recognition worker.
* @param {Object} imageCapture - Image capture that contains the current track.
* @param {number} threshold - Movement threshold as percentage for sharing face coordinates.
* @returns {Promise<void>}
*/
export async function sendDataToWorker(
worker: Worker,
imageCapture: Object
imageCapture: Object,
threshold: number = 10
): Promise<void> {
if (imageCapture === null || imageCapture === undefined) {
return;
}
let imageBitmap;
let image;
try {
imageBitmap = await imageCapture.grabFrame();
@ -131,8 +165,63 @@ export async function sendDataToWorker(
return;
}
if (typeof OffscreenCanvas === 'undefined') {
canvas.width = imageBitmap.width;
canvas.height = imageBitmap.height;
context.drawImage(imageBitmap, 0, 0);
image = context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
} else {
image = imageBitmap;
}
worker.postMessage({
type: SET_TIMEOUT,
imageBitmap
type: DETECT_FACE,
image,
threshold
});
imageBitmap.close();
}
/**
* Gets face box for a participant id.
*
* @param {string} id - The participant id.
* @param {Object} state - The redux state.
* @returns {Object}
*/
function getFaceBoxForId(id: string, state: Object) {
return state['features/facial-recognition'].faceBoxes[id];
}
/**
* Gets the video object position for a participant id.
*
* @param {Object} state - The redux state.
* @param {string} id - The participant id.
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
*/
export function getVideoObjectPosition(state: Object, id: string) {
const faceBox = getFaceBoxForId(id, state);
if (faceBox) {
const { right, width } = faceBox;
return `${right - (width / 2)}% 50%`;
}
return '50% 50%';
}
/**
* Gets the video object position for a participant id.
*
* @param {Object} state - The redux state.
* @returns {number} - Number of miliseconds for doing face detection.
*/
export function getDetectionInterval(state: Object) {
const { faceCoordinatesSharing } = state['features/base/config'];
return Math.min(faceCoordinatesSharing?.captureInterval || SEND_IMAGE_INTERVAL_MS);
}

View File

@ -5,36 +5,53 @@ import {
CONFERENCE_WILL_LEAVE,
getCurrentConference
} from '../base/conference';
import { JitsiConferenceEvents } from '../base/lib-jitsi-meet';
import { getParticipantCount } from '../base/participants';
import { MiddlewareRegistry } from '../base/redux';
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks';
import { VIRTUAL_BACKGROUND_TRACK_CHANGED } from '../virtual-background/actionTypes';
import { ADD_FACIAL_EXPRESSION } from './actionTypes';
import { ADD_FACIAL_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes';
import {
addToFacialExpressionsBuffer,
changeTrack,
loadWorker,
resetTrack,
stopFacialRecognition,
startFacialRecognition
} from './actions';
import { FACE_BOX_EVENT_TYPE } from './constants';
import { sendFacialExpressionToParticipants, sendFacialExpressionToServer } from './functions';
MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
const { enableFacialRecognition } = getState()['features/base/config'];
const { enableFacialRecognition, faceCoordinatesSharing } = getState()['features/base/config'];
const isEnabled = enableFacialRecognition || faceCoordinatesSharing?.enabled;
if (!enableFacialRecognition) {
return next(action);
}
if (action.type === CONFERENCE_JOINED) {
dispatch(loadWorker());
if (isEnabled) {
dispatch(loadWorker());
}
// allow using remote face centering data when local face centering is not enabled
action.conference.on(
JitsiConferenceEvents.ENDPOINT_MESSAGE_RECEIVED,
(participant, eventData) => {
if (!participant || !eventData) {
return;
}
if (eventData.type === FACE_BOX_EVENT_TYPE) {
dispatch({
type: UPDATE_FACE_COORDINATES,
faceBox: eventData.faceBox,
id: participant.getId()
});
}
});
return next(action);
}
if (!getCurrentConference(getState())) {
if (!isEnabled) {
return next(action);
}
@ -44,51 +61,45 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
return next(action);
}
case TRACK_UPDATED: {
const { videoType, type } = action.track.jitsiTrack;
case TRACK_ADDED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (videoType === 'camera') {
const { muted, videoStarted } = action.track;
if (videoStarted === true) {
dispatch(startFacialRecognition());
}
if (muted !== undefined) {
if (muted) {
dispatch(stopFacialRecognition());
} else {
dispatch(startFacialRecognition());
type === 'presenter' && changeTrack(action.track);
}
}
if (videoType === 'camera' && isLocal()) {
// need to pass this since the track is not yet added in the store
dispatch(startFacialRecognition(action.track));
}
return next(action);
}
case TRACK_ADDED: {
const { mediaType, videoType } = action.track;
case TRACK_UPDATED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (mediaType === 'presenter' && videoType === 'camera') {
dispatch(startFacialRecognition());
changeTrack(action.track);
if (videoType !== 'camera' || !isLocal()) {
return next(action);
}
const { muted } = action.track;
if (muted !== undefined) {
// addresses video mute state changes
if (muted) {
dispatch(stopFacialRecognition());
} else {
dispatch(startFacialRecognition());
}
}
return next(action);
}
case TRACK_REMOVED: {
const { videoType } = action.track.jitsiTrack;
const { jitsiTrack: { isLocal, videoType } } = action.track;
if ([ 'camera', 'desktop' ].includes(videoType)) {
if (videoType === 'camera' && isLocal()) {
dispatch(stopFacialRecognition());
}
return next(action);
}
case VIRTUAL_BACKGROUND_TRACK_CHANGED: {
dispatch(resetTrack());
return next(action);
}
case ADD_FACIAL_EXPRESSION: {
const state = getState();
const conference = getCurrentConference(state);

View File

@ -6,12 +6,13 @@ import {
ADD_FACIAL_EXPRESSION,
ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
CLEAR_FACIAL_EXPRESSIONS_BUFFER,
SET_DETECTION_TIME_INTERVAL,
START_FACIAL_RECOGNITION,
STOP_FACIAL_RECOGNITION
STOP_FACIAL_RECOGNITION,
UPDATE_FACE_COORDINATES
} from './actionTypes';
const defaultState = {
faceBoxes: {},
facialExpressions: {
happy: 0,
neutral: 0,
@ -22,7 +23,6 @@ const defaultState = {
sad: 0
},
facialExpressionsBuffer: [],
detectionTimeInterval: -1,
recognitionActive: false
};
@ -45,25 +45,27 @@ ReducerRegistry.register('features/facial-recognition', (state = defaultState, a
facialExpressionsBuffer: []
};
}
case SET_DETECTION_TIME_INTERVAL: {
return {
...state,
detectionTimeInterval: action.time
};
}
case START_FACIAL_RECOGNITION: {
return {
...state,
recognitionActive: true
};
}
case STOP_FACIAL_RECOGNITION: {
return {
...state,
recognitionActive: false
};
}
case UPDATE_FACE_COORDINATES: {
return {
...state,
faceBoxes: {
...state.faceBoxes,
[action.id]: action.faceBox
}
};
}
}
return state;

View File

@ -23,7 +23,7 @@ import {
getTrackByMediaTypeAndParticipant,
updateLastTrackVideoMediaEvent
} from '../../../base/tracks';
import { getVideoObjectPosition } from '../../../face-centering/functions';
import { getVideoObjectPosition } from '../../../facial-recognition/functions';
import { hideGif, showGif } from '../../../gifs/actions';
import { getGifDisplayMode, getGifForParticipant } from '../../../gifs/functions';
import { PresenceLabel } from '../../../presence-status';

View File

@ -384,16 +384,6 @@ module.exports = (_env, argv) => {
],
performance: getPerformanceHints(perfHintOptions, 35 * 1024)
}),
Object.assign({}, config, {
entry: {
'face-centering-worker': './react/features/face-centering/faceCenteringWorker.js'
},
plugins: [
...config.plugins,
...getBundleAnalyzerPlugin(analyzeBundle, 'face-centering-worker')
],
performance: getPerformanceHints(perfHintOptions, 500 * 1024)
}),
Object.assign({}, config, {
entry: {
'facial-expressions-worker': './react/features/facial-recognition/facialExpressionsWorker.js'