feat(face-landmarks): integrate human library

It replaces face-api.

* feat(face-landmarks): integrate human library

* feat(face-landmarks): rewrite worker in typescript

* fix(face-landmarks): allow worker bundle size up to 2 mib

* fix: remove unwanted comment

* code review
This commit is contained in:
Gabriel Borlea 2022-05-06 15:41:08 +03:00 committed by GitHub
parent adef5095da
commit 0c021868b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 229 additions and 289 deletions

View File

@ -7,7 +7,7 @@ TF_WASM_DIR = node_modules/@tensorflow/tfjs-backend-wasm/dist/
RNNOISE_WASM_DIR = node_modules/rnnoise-wasm/dist
TFLITE_WASM = react/features/stream-effects/virtual-background/vendor/tflite
MEET_MODELS_DIR = react/features/stream-effects/virtual-background/vendor/models
FACE_MODELS_DIR = node_modules/@vladmandic/face-api/model
FACE_MODELS_DIR = node_modules/@vladmandic/human-models/models
NODE_SASS = ./node_modules/.bin/sass
NPM = npm
OUTPUT_DIR = .
@ -91,10 +91,10 @@ deploy-meet-models:
deploy-face-landmarks:
cp \
$(FACE_MODELS_DIR)/tiny_face_detector_model-weights_manifest.json \
$(FACE_MODELS_DIR)/tiny_face_detector_model.bin \
$(FACE_MODELS_DIR)/face_expression_model-weights_manifest.json \
$(FACE_MODELS_DIR)/face_expression_model.bin \
$(FACE_MODELS_DIR)/blazeface-front.bin \
$(FACE_MODELS_DIR)/blazeface-front.json \
$(FACE_MODELS_DIR)/emotion.bin \
$(FACE_MODELS_DIR)/emotion.json \
$(DEPLOY_DIR)
deploy-css:

35
package-lock.json generated
View File

@ -53,7 +53,8 @@
"@svgr/webpack": "4.3.2",
"@tensorflow/tfjs-backend-wasm": "3.13.0",
"@tensorflow/tfjs-core": "3.13.0",
"@vladmandic/face-api": "1.6.4",
"@vladmandic/human": "2.6.5",
"@vladmandic/human-models": "2.5.9",
"@xmldom/xmldom": "0.7.5",
"amplitude-js": "8.2.1",
"base64-js": "1.3.1",
@ -5587,14 +5588,22 @@
"resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-20.2.1.tgz",
"integrity": "sha512-7tFImggNeNBVMsn0vLrpn1H1uPrUBdnARPTpZoitY37ZrdJREzf7I16tMrlK3hen349gr1NYh8CmZQa7CTG6Aw=="
},
"node_modules/@vladmandic/face-api": {
"version": "1.6.4",
"resolved": "https://registry.npmjs.org/@vladmandic/face-api/-/face-api-1.6.4.tgz",
"integrity": "sha512-tVx8lCL1mKb44qeN5EEypJNXqxRYXh+7BcSzfY4iMaZIoF5Y+Jev20UiIn9JvxwGV2caWkdFIjpvw+OxsL/kdg==",
"node_modules/@vladmandic/human": {
"version": "2.6.5",
"resolved": "https://registry.npmjs.org/@vladmandic/human/-/human-2.6.5.tgz",
"integrity": "sha512-5fG5lICkJw17d8fT9ZHtioAPLmOKsQlNN4rbuA1t21HlV7KL3M2EMbYeO+ZE0t6VLSioZZH/KoOFvW5A0JP0Hg==",
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/@vladmandic/human-models": {
"version": "2.5.9",
"resolved": "https://registry.npmjs.org/@vladmandic/human-models/-/human-models-2.5.9.tgz",
"integrity": "sha512-WhV0RIeELsA73LGMpemYJvpIEuG0nkh6rP1x7JctKDQkNaARgWolTq9r1RUvWw++rQfzfZ82bYcAauJcaGW6bw==",
"dependencies": {
"@vladmandic/human": "^2.5.8"
}
},
"node_modules/@webassemblyjs/ast": {
"version": "1.11.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz",
@ -24180,10 +24189,18 @@
"resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-20.2.1.tgz",
"integrity": "sha512-7tFImggNeNBVMsn0vLrpn1H1uPrUBdnARPTpZoitY37ZrdJREzf7I16tMrlK3hen349gr1NYh8CmZQa7CTG6Aw=="
},
"@vladmandic/face-api": {
"version": "1.6.4",
"resolved": "https://registry.npmjs.org/@vladmandic/face-api/-/face-api-1.6.4.tgz",
"integrity": "sha512-tVx8lCL1mKb44qeN5EEypJNXqxRYXh+7BcSzfY4iMaZIoF5Y+Jev20UiIn9JvxwGV2caWkdFIjpvw+OxsL/kdg=="
"@vladmandic/human": {
"version": "2.6.5",
"resolved": "https://registry.npmjs.org/@vladmandic/human/-/human-2.6.5.tgz",
"integrity": "sha512-5fG5lICkJw17d8fT9ZHtioAPLmOKsQlNN4rbuA1t21HlV7KL3M2EMbYeO+ZE0t6VLSioZZH/KoOFvW5A0JP0Hg=="
},
"@vladmandic/human-models": {
"version": "2.5.9",
"resolved": "https://registry.npmjs.org/@vladmandic/human-models/-/human-models-2.5.9.tgz",
"integrity": "sha512-WhV0RIeELsA73LGMpemYJvpIEuG0nkh6rP1x7JctKDQkNaARgWolTq9r1RUvWw++rQfzfZ82bYcAauJcaGW6bw==",
"requires": {
"@vladmandic/human": "^2.5.8"
}
},
"@webassemblyjs/ast": {
"version": "1.11.1",

View File

@ -58,7 +58,8 @@
"@svgr/webpack": "4.3.2",
"@tensorflow/tfjs-backend-wasm": "3.13.0",
"@tensorflow/tfjs-core": "3.13.0",
"@vladmandic/face-api": "1.6.4",
"@vladmandic/human": "2.6.5",
"@vladmandic/human-models": "2.5.9",
"@xmldom/xmldom": "0.7.5",
"amplitude-js": "8.2.1",
"base64-js": "1.3.1",

View File

@ -1,5 +1,3 @@
// @flow
export const FACE_EXPRESSIONS_EMOJIS = {
happy: '😊',
neutral: '😐',
@ -13,6 +11,16 @@ export const FACE_EXPRESSIONS_EMOJIS = {
export const FACE_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ];
export const FACE_EXPRESSIONS_NAMING_MAPPING = {
happy: 'happy',
neutral: 'neutral',
surprise: 'surprised',
angry: 'angry',
fear: 'fearful',
disgust: 'disgusted',
sad: 'sad'
};
/**
* Time is ms used for sending expression.
*/

View File

@ -1,108 +0,0 @@
/* eslint-disable */
// From: https://github.com/justadudewhohacks/face-api.js/issues/47
// This is needed because face-api.js does not support working in a WebWorker natively
// Updated Dec 1 2020 to work on latest Chrome (tested in WebWorkers on Chrome Mobile on Android / Google Pixel 3 as well)
self.useWasm = false;
if(!self.OffscreenCanvas) {
self.useWasm = true;
self.OffscreenCanvas = class OffscreenCanvas {
constructor() {
}
}
}
if(!self.OffscreenCanvasRenderingContext2D) {
self.OffscreenCanvasRenderingContext2D = class OffscreenCanvasRenderingContext2D {
constructor() {
}
}
}
self.Canvas = self.HTMLCanvasElement = OffscreenCanvas;
// self.HTMLCanvasElement.name = 'HTMLCanvasElement';
// self.Canvas.name = 'Canvas';
self.CanvasRenderingContext2D = OffscreenCanvasRenderingContext2D;
function HTMLImageElement(){}
function HTMLVideoElement(){}
self.Image = HTMLImageElement;
self.Video = HTMLVideoElement;
function Storage () {
let _data = {};
this.clear = function(){ return _data = {}; };
this.getItem = function(id){ return _data.hasOwnProperty(id) ? _data[id] : undefined; };
this.removeItem = function(id){ return delete _data[id]; };
this.setItem = function(id, val){ return _data[id] = String(val); };
}
class Document extends EventTarget {}
self.document = new Document();
self.window = self.Window = self;
self.localStorage = new Storage();
function createElement(element) {
switch(element) {
case 'canvas':
let canvas = new Canvas(1,1);
canvas.localName = 'canvas';
canvas.nodeName = 'CANVAS';
canvas.tagName = 'CANVAS';
canvas.nodeType = 1;
canvas.innerHTML = '';
canvas.remove = () => { console.log('nope'); };
return canvas;
default:
console.log('arg', element);
break;
}
}
document.createElement = createElement;
document.location = self.location;
// These are the same checks face-api.js/isBrowser does
if(!typeof window == 'object') {
console.warn("Check failed: window");
}
if(typeof document === 'undefined') {
console.warn("Check failed: document");
}
if(typeof HTMLImageElement === 'undefined') {
console.warn("Check failed: HTMLImageElement");
}
if(typeof HTMLCanvasElement === 'undefined') {
console.warn("Check failed: HTMLCanvasElement");
}
if(typeof HTMLVideoElement === 'undefined') {
console.warn("Check failed: HTMLVideoElement");
}
if(typeof ImageData === 'undefined') {
console.warn("Check failed: ImageData");
}
if(typeof CanvasRenderingContext2D === 'undefined') {
console.warn("Check failed: CanvasRenderingContext2D");
}
self.window = window;
self.document = document;
self.HTMLImageElement = HTMLImageElement;
self.HTMLVideoElement = HTMLVideoElement;
// These are the same checks face-api.js/isBrowser does
const isBrowserCheck = typeof window === 'object'
&& typeof document !== 'undefined'
&& typeof HTMLImageElement !== 'undefined'
&& typeof HTMLCanvasElement !== 'undefined'
&& typeof HTMLVideoElement !== 'undefined'
&& typeof ImageData !== 'undefined'
&& typeof CanvasRenderingContext2D !== 'undefined';
;
if(!isBrowserCheck) {
throw new Error("Failed to monkey patch for face-api, face-api will fail");
}

View File

@ -1,159 +0,0 @@
import './faceApiPatch';
import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
import * as faceapi from '@vladmandic/face-api';
import { DETECTION_TYPES, DETECT_FACE, INIT_WORKER } from './constants';
/**
* Detection types to be applied.
*/
let faceDetectionTypes = [];
/**
* Indicates whether an init error occured.
*/
let initError = false;
/**
* A flag that indicates whether the models are loaded or not.
*/
let modelsLoaded = false;
/**
* A flag that indicates whether the tensorflow backend is set or not.
*/
let backendSet = false;
/**
* Flag for indicating whether a face detection flow is in progress or not.
*/
let detectionInProgress = false;
/**
* Contains the last valid face bounding box (passes threshold validation) which was sent to the main process.
*/
let lastValidFaceBox;
const detectFaceBox = async ({ detections, threshold }) => {
if (!detections.length) {
return null;
}
const faceBox = {
// normalize to percentage based
left: Math.round(Math.min(...detections.map(d => d.relativeBox.left)) * 100),
right: Math.round(Math.max(...detections.map(d => d.relativeBox.right)) * 100)
};
faceBox.width = Math.round(faceBox.right - faceBox.left);
if (lastValidFaceBox && Math.abs(lastValidFaceBox.left - faceBox.left) < threshold) {
return null;
}
lastValidFaceBox = faceBox;
return faceBox;
};
const detectFaceExpression = async ({ detections }) =>
detections[0]?.expressions.asSortedArray()[0].expression;
const detect = async ({ image, threshold }) => {
let detections;
let faceExpression;
let faceBox;
detectionInProgress = true;
faceapi.tf.engine().startScope();
const imageTensor = faceapi.tf.browser.fromPixels(image);
if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
detections = await faceapi.detectAllFaces(
imageTensor,
new faceapi.TinyFaceDetectorOptions()
).withFaceExpressions();
faceExpression = await detectFaceExpression({ detections });
}
if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX)) {
detections = detections
? detections.map(d => d.detection)
: await faceapi.detectAllFaces(imageTensor, new faceapi.TinyFaceDetectorOptions());
faceBox = await detectFaceBox({
detections,
threshold
});
}
faceapi.tf.engine().endScope();
if (faceBox || faceExpression) {
self.postMessage({
faceBox,
faceExpression
});
}
detectionInProgress = false;
};
const init = async ({ baseUrl, detectionTypes }) => {
faceDetectionTypes = detectionTypes;
if (!backendSet) {
try {
if (self.useWasm) {
setWasmPaths(baseUrl);
await faceapi.tf.setBackend('wasm');
} else {
await faceapi.tf.setBackend('webgl');
}
backendSet = true;
} catch (err) {
initError = true;
return;
}
}
// load face detection model
if (!modelsLoaded) {
try {
await faceapi.loadTinyFaceDetectorModel(baseUrl);
if (detectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
await faceapi.loadFaceExpressionModel(baseUrl);
}
modelsLoaded = true;
} catch (err) {
initError = true;
return;
}
}
};
onmessage = function(message) {
switch (message.data.type) {
case DETECT_FACE: {
if (!backendSet || !modelsLoaded || initError || detectionInProgress) {
return;
}
detect(message.data);
break;
}
case INIT_WORKER: {
init(message.data);
break;
}
}
};

View File

@ -0,0 +1,181 @@
import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
import { Human, Config, FaceResult } from '@vladmandic/human';
import { DETECTION_TYPES, DETECT_FACE, INIT_WORKER, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
type Detection = {
detections: Array<FaceResult>,
threshold?: number
};
type DetectInput = {
image: ImageBitmap | ImageData,
threshold: number
};
type FaceBox = {
left: number,
right: number,
width?: number
};
type InitInput = {
baseUrl: string,
detectionTypes: string[]
}
/**
* An object that is used for using human.
*/
let human: Human;
/**
* Detection types to be applied.
*/
let faceDetectionTypes: string[] = [];
/**
* Flag for indicating whether a face detection flow is in progress or not.
*/
let detectionInProgress = false;
/**
* Contains the last valid face bounding box (passes threshold validation) which was sent to the main process.
*/
let lastValidFaceBox: FaceBox;
/**
* Configuration for human.
*/
const config: Partial<Config> = {
backend: 'humangl',
async: true,
warmup: 'none',
cacheModels: true,
cacheSensitivity: 0,
debug: false,
deallocate: true,
filter: { enabled: false },
face: {
enabled: true,
detector: {
enabled: true,
rotation: false
},
mesh: { enabled: false },
iris: { enabled: false },
emotion: { enabled: false },
description: { enabled: false }
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false }
};
const detectFaceBox = async ({ detections, threshold }: Detection) => {
if (!detections.length) {
return null;
}
const faceBox: FaceBox = {
// normalize to percentage based
left: Math.round(Math.min(...detections.map(d => d.boxRaw[0])) * 100),
right: Math.round(Math.max(...detections.map(d => d.boxRaw[0] + d.boxRaw[2])) * 100)
};
faceBox.width = Math.round(faceBox.right - faceBox.left);
if (lastValidFaceBox && threshold && Math.abs(lastValidFaceBox.left - faceBox.left) < threshold) {
return null;
}
lastValidFaceBox = faceBox;
return faceBox;
};
const detectFaceExpression = async ({ detections }: Detection) =>
detections[0]?.emotion && FACE_EXPRESSIONS_NAMING_MAPPING[detections[0]?.emotion[0].emotion];
const detect = async ({ image, threshold } : DetectInput) => {
let detections;
let faceExpression;
let faceBox;
detectionInProgress = true;
const imageTensor = human.tf.browser.fromPixels(image);
if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
const { face } = await human.detect(imageTensor, config);
detections = face;
faceExpression = await detectFaceExpression({ detections });
}
if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX)) {
if (!detections) {
const { face } = await human.detect(imageTensor, config);
detections = face;
}
faceBox = await detectFaceBox({
detections,
threshold
});
}
if (faceBox || faceExpression) {
self.postMessage({
faceBox,
faceExpression
});
}
detectionInProgress = false;
};
const init = async ({ baseUrl, detectionTypes }: InitInput) => {
faceDetectionTypes = detectionTypes;
if (!human) {
config.modelBasePath = baseUrl;
if (!self.OffscreenCanvas) {
config.backend = 'wasm';
config.wasmPath = baseUrl;
setWasmPaths(baseUrl);
}
if (detectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS) && config.face) {
config.face.emotion = { enabled: true };
}
const initialHuman = new Human(config);
try {
await initialHuman.load();
} catch (err) {
console.error(err);
}
human = initialHuman;
}
};
onmessage = function(message: MessageEvent<any>) {
switch (message.data.type) {
case DETECT_FACE: {
if (!human || detectionInProgress) {
return;
}
detect(message.data);
break;
}
case INIT_WORKER: {
init(message.data);
break;
}
}
};

View File

@ -5,7 +5,7 @@ import React from 'react';
import { Avatar, StatelessAvatar } from '../../../base/avatar';
import { getInitials } from '../../../base/avatar/functions';
import BaseTheme from '../../../base/ui/components/BaseTheme';
import { FACE_EXPRESSIONS } from '../../../face-landmarks/constants.js';
import { FACE_EXPRESSIONS } from '../../../face-landmarks/constants';
import TimeElapsed from './TimeElapsed';

View File

@ -4,7 +4,7 @@ import React from 'react';
import { useTranslation } from 'react-i18next';
import { Tooltip } from '../../../base/tooltip';
import { FACE_EXPRESSIONS_EMOJIS } from '../../../face-landmarks/constants.js';
import { FACE_EXPRESSIONS_EMOJIS } from '../../../face-landmarks/constants';
const useStyles = makeStyles(theme => {
return {

View File

@ -5,7 +5,7 @@
"module": "es6",
"target": "es6",
"jsx": "react",
"lib": [ "ES2020", "DOM" ],
"lib": [ "webworker", "ES2020", "DOM" ],
"noEmit": false,
"moduleResolution": "Node",
"strict": true,

View File

@ -384,13 +384,13 @@ module.exports = (_env, argv) => {
}),
Object.assign({}, config, {
entry: {
'face-landmarks-worker': './react/features/face-landmarks/faceLandmarksWorker.js'
'face-landmarks-worker': './react/features/face-landmarks/faceLandmarksWorker.ts'
},
plugins: [
...config.plugins,
...getBundleAnalyzerPlugin(analyzeBundle, 'face-landmarks-worker')
],
performance: getPerformanceHints(perfHintOptions, 1024 * 1024 * 1.5)
performance: getPerformanceHints(perfHintOptions, 1024 * 1024 * 2)
})
];
};