feat(facial-expressions): add the facial expression feature and display them in speakerstats (#10006)

* Initial implementation; Happy flow

* Maybe revert this

* Functional prototype

* feat(facial-expressions): get stream when changing background effect and use presenter effect with camera

* add(facial-expressions): array that stores the expressions durin the meeting

* refactor(facial-expressions): capture imagebitmap from stream with imagecapture api

* add(speaker-stats): expression label

* fix(facial-expression): expression store

* revert: expression leabel on speaker stats

* add(facial-expressions): broadcast of expression when it changes

* feat: facial expression handling on prosody

* fix(facial-expressions): get the right track when opening and closing camera

* add(speaker-stats): facial expression column

* fix(facial-expressions): allow to start facial recognition only after joining conference

* fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it

* chore(facial-expressions): change detection from 2000ms to 1000ms

* add(facial-expressions): send expression to server when there is only one participant

* feat(facial-expressions): store expresions as a timeline

* feat(mod_speakerstats_component): store facial expresions as a timeline

* fix(facial-expressions): stop facial recognition only when muting video track

* fix(facial-expressions): presenter mode get right track to detect face

* add: polyfils for image capture for firefox and safari

* refactor(facial-expressions): store expressions by counting them in a map

* chore(facial-expressions): remove manually assigning the backend for tenserflowjs

* feat(facial-expressions): move face-api from main thread to web worker

* fix(facial-expressions): make feature work on firefox and safari

* feat(facial-expressions): camera time tracker

* feat(facial-expressions): camera time tracker in prosody

* add(facial-expressions): expressions time as TimeElapsed object in speaker stats

* fix(facial-expresions): lower the frequency of detection when tf uses cpu backend

* add(facial-expressions): duration to the expression and send it with durantion when it is done

* fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp

* refactor(facial-expressions): change expressions labels from text to emoji

* refactor(facial-expressions): remove camera time tracker

* add(facial-expressions): detection time interval

* chore(facial-expressions): add docs and minor refactor of the code

* refactor(facial-expressions): put timeout in worker and remove set interval in main thread

* feat(facial-expressions): disable feature in the config

* add(facial-expressions): tooltips of labels in speaker stats

* refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs

* refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware

* chore(facial-expressions): order imports and format some code

* fix(facial-expressions): rebase issues with newer master

* fix(facial-expressions): package-lock.json

* fix(facial-expression): add commented default value of disableFacialRecognition flag and short description

* fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config

* fix: resources load-test package-lock.json

* fix(facial-expressions): set and get facial expressions only if facial recognition enabled

* add: facial recognition resources folder in .eslintignore

* chore: package-lock update

* fix: package-lock.json

* fix(facial-expressions): gpu memory leak in the web worker

* fix(facial-expressions): set cpu time interval for detection to 6000ms

* chore(speaker-stats): fix indentation

* chore(facial-expressions): remove empty lines between comments and type declarations

* fix(facial-expressions): remove camera timetracker

* fix(facial-expressions): remove facialRecognitionAllowed flag

* fix(facial-expressions): remove sending interval time to worker

* refactor(facial-expression): middleware

* fix(facial-expression): end tensor scope after setting backend

* fix(facial-expressions): sending info back to worker only on facial expression message

* fix: lint errors

* refactor(facial-expressions): bundle web worker using webpack

* fix: deploy-facial-expressions command in makefile

* chore: fix load test package-lock.json and package.json

* chore: sync package-lock.json

Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
This commit is contained in:
Gabriel Borlea 2021-11-17 16:33:03 +02:00 committed by GitHub
parent e42db3c9c2
commit 61684b1071
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1222 additions and 39 deletions

View File

@ -8,6 +8,7 @@ libs/*
resources/*
react/features/stream-effects/virtual-background/vendor/*
load-test/*
react/features/facial-recognition/resources/*
# ESLint will by default ignore its own configuration file. However, there does
# not seem to be a reason why we will want to risk being inconsistent with our

View File

@ -7,6 +7,7 @@ OLM_DIR = node_modules/@matrix-org/olm
RNNOISE_WASM_DIR = node_modules/rnnoise-wasm/dist/
TFLITE_WASM = react/features/stream-effects/virtual-background/vendor/tflite
MEET_MODELS_DIR = react/features/stream-effects/virtual-background/vendor/models/
FACIAL_MODELS_DIR = react/features/facial-recognition/resources
NODE_SASS = ./node_modules/.bin/sass
NPM = npm
OUTPUT_DIR = .
@ -28,7 +29,7 @@ clean:
rm -fr $(BUILD_DIR)
.NOTPARALLEL:
deploy: deploy-init deploy-appbundle deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-css deploy-local
deploy: deploy-init deploy-appbundle deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-css deploy-local deploy-facial-expressions
deploy-init:
rm -fr $(DEPLOY_DIR)
@ -92,6 +93,13 @@ deploy-meet-models:
$(MEET_MODELS_DIR)/*.tflite \
$(DEPLOY_DIR)
deploy-facial-expressions:
cp \
$(FACIAL_MODELS_DIR)/* \
$(BUILD_DIR)/facial-expressions-worker.min.js \
$(BUILD_DIR)/facial-expressions-worker.min.js.map \
$(DEPLOY_DIR)
deploy-css:
$(NODE_SASS) $(STYLES_MAIN) $(STYLES_BUNDLE) && \
$(CLEANCSS) --skip-rebase $(STYLES_BUNDLE) > $(STYLES_DESTINATION) ; \
@ -101,7 +109,7 @@ deploy-local:
([ ! -x deploy-local.sh ] || ./deploy-local.sh)
.NOTPARALLEL:
dev: deploy-init deploy-css deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm
dev: deploy-init deploy-css deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-facial-expressions
$(WEBPACK_DEV_SERVER)
source-package:

View File

@ -2998,6 +2998,15 @@ export default {
room.sendEndpointMessage(to, payload);
},
/**
* Sends a facial expression as a string and its duration as a number
* @param {object} payload - Object containing the {string} facialExpression
* and {number} duration
*/
sendFacialExpression(payload) {
room.sendFacialExpression(payload);
},
/**
* Adds new listener.
* @param {String} eventName the name of the event

View File

@ -649,6 +649,9 @@ var config = {
// Enables sending participants' emails (if available) to callstats and other analytics
// enableEmailInStats: false,
// Enables detecting faces of participants and get their expression and send it to other participants
// enableFaceRecognition: true,
// Controls the percentage of automatic feedback shown to participants when callstats is enabled.
// The default value is 100%. If set to 0, no automatic feedback will be requested
// feedbackPercentage: 100,

View File

@ -27,7 +27,10 @@
.speaker-stats-item__status,
.speaker-stats-item__name,
.speaker-stats-item__time {
.speaker-stats-item__time,
.speaker-stats-item__name_expressions_on,
.speaker-stats-item__time_expressions_on,
.speaker-stats-item__expression {
display: inline-block;
margin: 5px 0;
vertical-align: middle;
@ -41,9 +44,35 @@
.speaker-stats-item__time {
width: 55%;
}
.speaker-stats-item__name_expressions_on {
width: 20%;
}
.speaker-stats-item__time_expressions_on {
width: 25%;
}
.speaker-stats-item__expression {
width: 7%;
text-align: center;
}
@media(max-width: 750px) {
.speaker-stats-item__name_expressions_on {
width: 25%;
}
.speaker-stats-item__time_expressions_on {
width: 30%;
}
.speaker-stats-item__expression {
width: 10%;
}
}
.speaker-stats-item__name,
.speaker-stats-item__time {
.speaker-stats-item__time,
.speaker-stats-item__name_expressions_on,
.speaker-stats-item__time_expressions_on,
.speaker-stats-item__expression {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;

View File

@ -862,7 +862,14 @@
"name": "Name",
"seconds": "{{count}}s",
"speakerStats": "Speaker Stats",
"speakerTime": "Speaker Time"
"speakerTime": "Speaker Time",
"happy": "Happy",
"neutral": "Neutral",
"sad": "Sad",
"surprised": "Surprised",
"angry": "Angry",
"fearful": "Fearful",
"disgusted": "Disgusted"
},
"startupoverlay": {
"policyText": " ",

137
package-lock.json generated
View File

@ -53,6 +53,7 @@
"clipboard-copy": "4.0.1",
"clsx": "1.1.1",
"dropbox": "10.7.0",
"face-api.js": "0.22.2",
"focus-visible": "5.1.0",
"i18n-iso-countries": "6.8.0",
"i18next": "17.0.6",
@ -4562,6 +4563,30 @@
"node": ">=8"
}
},
"node_modules/@tensorflow/tfjs-core": {
"version": "1.7.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-core/-/tfjs-core-1.7.0.tgz",
"integrity": "sha512-uwQdiklNjqBnHPeseOdG0sGxrI3+d6lybaKu2+ou3ajVeKdPEwpWbgqA6iHjq1iylnOGkgkbbnQ6r2lwkiIIHw==",
"dependencies": {
"@types/offscreencanvas": "~2019.3.0",
"@types/seedrandom": "2.4.27",
"@types/webgl-ext": "0.0.30",
"@types/webgl2": "0.0.4",
"node-fetch": "~2.1.2",
"seedrandom": "2.4.3"
},
"engines": {
"yarn": ">= 1.3.2"
}
},
"node_modules/@tensorflow/tfjs-core/node_modules/node-fetch": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.1.2.tgz",
"integrity": "sha1-q4hOjn5X44qUR1POxwb3iNF2i7U=",
"engines": {
"node": "4.x || >=6.0.0"
}
},
"node_modules/@types/eslint": {
"version": "7.28.0",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-7.28.0.tgz",
@ -4642,6 +4667,11 @@
"integrity": "sha512-ho3Ruq+fFnBrZhUYI46n/bV2GjwzSkwuT4dTf0GkuNFmnb8nq4ny2z9JEVemFi6bdEJanHLlYfy9c6FN9B9McQ==",
"dev": true
},
"node_modules/@types/offscreencanvas": {
"version": "2019.3.0",
"resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.3.0.tgz",
"integrity": "sha512-esIJx9bQg+QYF0ra8GnvfianIY8qWB0GBx54PK5Eps6m+xTj86KLavHv6qDhzKcu5UUOgNfJ2pWaIIV7TRUd9Q=="
},
"node_modules/@types/parse-json": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz",
@ -4691,11 +4721,26 @@
"resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.1.tgz",
"integrity": "sha512-EaCxbanVeyxDRTQBkdLb3Bvl/HK7PBK6UJjsSixB0iHKoWxE5uu2Q/DgtpOhPIojN0Zl1whvOd7PoHs2P0s5eA=="
},
"node_modules/@types/seedrandom": {
"version": "2.4.27",
"resolved": "https://registry.npmjs.org/@types/seedrandom/-/seedrandom-2.4.27.tgz",
"integrity": "sha1-nbVjk33YaRX2kJK8QyWdL0hXjkE="
},
"node_modules/@types/stack-utils": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-1.0.1.tgz",
"integrity": "sha512-l42BggppR6zLmpfU6fq9HEa2oGPEI8yrSPL3GITjfRInppYFahObbIQOQK3UGxEnyQpltZLaPe75046NOZQikw=="
},
"node_modules/@types/webgl-ext": {
"version": "0.0.30",
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
},
"node_modules/@types/webgl2": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/@types/webgl2/-/webgl2-0.0.4.tgz",
"integrity": "sha512-PACt1xdErJbMUOUweSrbVM7gSIYm1vTncW2hF6Os/EeWi6TXYAYMPp+8v6rzHmypE5gHrxaxZNXgMkJVIdZpHw=="
},
"node_modules/@types/yargs": {
"version": "13.0.8",
"resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-13.0.8.tgz",
@ -9328,6 +9373,15 @@
"node": ">=0.10.0"
}
},
"node_modules/face-api.js": {
"version": "0.22.2",
"resolved": "https://registry.npmjs.org/face-api.js/-/face-api.js-0.22.2.tgz",
"integrity": "sha512-9Bbv/yaBRTKCXjiDqzryeKhYxmgSjJ7ukvOvEBy6krA0Ah/vNBlsf7iBNfJljWiPA8Tys1/MnB3lyP2Hfmsuyw==",
"dependencies": {
"@tensorflow/tfjs-core": "1.7.0",
"tslib": "^1.11.1"
}
},
"node_modules/fancy-log": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/fancy-log/-/fancy-log-1.3.3.tgz",
@ -16293,11 +16347,6 @@
}
}
},
"node_modules/react-uid/node_modules/tslib": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
"integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
},
"node_modules/react-window": {
"version": "1.8.6",
"resolved": "https://registry.npmjs.org/react-window/-/react-window-1.8.6.tgz",
@ -16861,6 +16910,11 @@
"sdp-verify": "checker.js"
}
},
"node_modules/seedrandom": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.3.tgz",
"integrity": "sha1-JDhQTa0zkXMUv/GKxNeU8W1qrsw="
},
"node_modules/select-hose": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
@ -18421,9 +18475,9 @@
}
},
"node_modules/tslib": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
"integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ=="
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
"integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
},
"node_modules/type-check": {
"version": "0.4.0",
@ -23625,6 +23679,26 @@
"loader-utils": "^1.2.3"
}
},
"@tensorflow/tfjs-core": {
"version": "1.7.0",
"resolved": "https://registry.npmjs.org/@tensorflow/tfjs-core/-/tfjs-core-1.7.0.tgz",
"integrity": "sha512-uwQdiklNjqBnHPeseOdG0sGxrI3+d6lybaKu2+ou3ajVeKdPEwpWbgqA6iHjq1iylnOGkgkbbnQ6r2lwkiIIHw==",
"requires": {
"@types/offscreencanvas": "~2019.3.0",
"@types/seedrandom": "2.4.27",
"@types/webgl-ext": "0.0.30",
"@types/webgl2": "0.0.4",
"node-fetch": "~2.1.2",
"seedrandom": "2.4.3"
},
"dependencies": {
"node-fetch": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.1.2.tgz",
"integrity": "sha1-q4hOjn5X44qUR1POxwb3iNF2i7U="
}
}
},
"@types/eslint": {
"version": "7.28.0",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-7.28.0.tgz",
@ -23705,6 +23779,11 @@
"integrity": "sha512-ho3Ruq+fFnBrZhUYI46n/bV2GjwzSkwuT4dTf0GkuNFmnb8nq4ny2z9JEVemFi6bdEJanHLlYfy9c6FN9B9McQ==",
"dev": true
},
"@types/offscreencanvas": {
"version": "2019.3.0",
"resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.3.0.tgz",
"integrity": "sha512-esIJx9bQg+QYF0ra8GnvfianIY8qWB0GBx54PK5Eps6m+xTj86KLavHv6qDhzKcu5UUOgNfJ2pWaIIV7TRUd9Q=="
},
"@types/parse-json": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz",
@ -23756,11 +23835,26 @@
"resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.1.tgz",
"integrity": "sha512-EaCxbanVeyxDRTQBkdLb3Bvl/HK7PBK6UJjsSixB0iHKoWxE5uu2Q/DgtpOhPIojN0Zl1whvOd7PoHs2P0s5eA=="
},
"@types/seedrandom": {
"version": "2.4.27",
"resolved": "https://registry.npmjs.org/@types/seedrandom/-/seedrandom-2.4.27.tgz",
"integrity": "sha1-nbVjk33YaRX2kJK8QyWdL0hXjkE="
},
"@types/stack-utils": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-1.0.1.tgz",
"integrity": "sha512-l42BggppR6zLmpfU6fq9HEa2oGPEI8yrSPL3GITjfRInppYFahObbIQOQK3UGxEnyQpltZLaPe75046NOZQikw=="
},
"@types/webgl-ext": {
"version": "0.0.30",
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
},
"@types/webgl2": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/@types/webgl2/-/webgl2-0.0.4.tgz",
"integrity": "sha512-PACt1xdErJbMUOUweSrbVM7gSIYm1vTncW2hF6Os/EeWi6TXYAYMPp+8v6rzHmypE5gHrxaxZNXgMkJVIdZpHw=="
},
"@types/yargs": {
"version": "13.0.8",
"resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-13.0.8.tgz",
@ -27429,6 +27523,15 @@
}
}
},
"face-api.js": {
"version": "0.22.2",
"resolved": "https://registry.npmjs.org/face-api.js/-/face-api.js-0.22.2.tgz",
"integrity": "sha512-9Bbv/yaBRTKCXjiDqzryeKhYxmgSjJ7ukvOvEBy6krA0Ah/vNBlsf7iBNfJljWiPA8Tys1/MnB3lyP2Hfmsuyw==",
"requires": {
"@tensorflow/tfjs-core": "1.7.0",
"tslib": "^1.11.1"
}
},
"fancy-log": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/fancy-log/-/fancy-log-1.3.3.tgz",
@ -32888,13 +32991,6 @@
"integrity": "sha512-6C5pwNYP1udgp5feQ9MTBZBKD4su9nhD2aYCFY1bB0Bpask8wYKYz0ZhAtAJ4lcmTDC5kY1ByGTQMFDHQW6p0w==",
"requires": {
"tslib": "^1.10.0"
},
"dependencies": {
"tslib": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
"integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
}
}
},
"react-window": {
@ -33335,6 +33431,11 @@
"resolved": "https://registry.npmjs.org/sdp-transform/-/sdp-transform-2.3.0.tgz",
"integrity": "sha1-V6lXWUIEHYV3qGnXx01MOgvYiPY="
},
"seedrandom": {
"version": "2.4.3",
"resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.3.tgz",
"integrity": "sha1-JDhQTa0zkXMUv/GKxNeU8W1qrsw="
},
"select-hose": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
@ -34575,9 +34676,9 @@
}
},
"tslib": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
"integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ=="
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
"integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
},
"type-check": {
"version": "0.4.0",

View File

@ -58,6 +58,7 @@
"clipboard-copy": "4.0.1",
"clsx": "1.1.1",
"dropbox": "10.7.0",
"face-api.js": "0.22.2",
"focus-visible": "5.1.0",
"i18n-iso-countries": "6.8.0",
"i18next": "17.0.6",

View File

@ -17,5 +17,6 @@ import '../screen-share/middleware';
import '../shared-video/middleware';
import '../talk-while-muted/middleware';
import '../virtual-background/middleware';
import '../facial-recognition/middleware';
import './middlewares.any';

View File

@ -2,6 +2,7 @@
import '../base/devices/reducer';
import '../e2ee/reducer';
import '../facial-recognition/reducer';
import '../feedback/reducer';
import '../local-recording/reducer';
import '../no-audio-signal/reducer';

View File

@ -0,0 +1,41 @@
// @flow
/**
* Redux action type dispatched in order to add a facial expression.
*
* {
* type: ADD_FACIAL_EXPRESSION,
* facialExpression: string,
* duration: number
* }
*/
export const ADD_FACIAL_EXPRESSION = 'ADD_FACIAL_EXPRESSION';
/**
* Redux action type dispatched in order to set the time interval in which
* the message to the facial expression worker will be sent.
*
* {
* type: SET_DETECTION_TIME_INTERVAL,
* time: number
* }
*/
export const SET_DETECTION_TIME_INTERVAL = 'SET_DETECTION_TIME_INTERVAL';
/**
* Redux action type dispatched in order to set recognition active in the state.
*
* {
* type: START_FACIAL_RECOGNITION
* }
*/
export const START_FACIAL_RECOGNITION = 'START_FACIAL_RECOGNITION';
/**
* Redux action type dispatched in order to set recognition inactive in the state.
*
* {
* type: STOP_FACIAL_RECOGNITION
* }
*/
export const STOP_FACIAL_RECOGNITION = 'STOP_FACIAL_RECOGNITION';

View File

@ -0,0 +1,229 @@
// @flow
import { getLocalVideoTrack } from '../base/tracks';
import 'image-capture';
import './createImageBitmap';
import {
ADD_FACIAL_EXPRESSION,
SET_DETECTION_TIME_INTERVAL,
START_FACIAL_RECOGNITION,
STOP_FACIAL_RECOGNITION
} from './actionTypes';
import { sendDataToWorker } from './functions';
import logger from './logger';
/**
* Time used for detection interval when facial expressions worker uses webgl backend.
*/
const WEBGL_TIME_INTERVAL = 1000;
/**
* Time used for detection interval when facial expression worker uses cpu backend.
*/
const CPU_TIME_INTERVAL = 6000;
/**
* Object containing a image capture of the local track.
*/
let imageCapture;
/**
* Object where the facial expression worker is stored.
*/
let worker;
/**
* The last facial expression received from the worker.
*/
let lastFacialExpression;
/**
* How many duplicate consecutive expression occurred.
* If a expression that is not the same as the last one it is reset to 0.
*/
let duplicateConsecutiveExpressions = 0;
/**
* Loads the worker that predicts the facial expression.
*
* @returns {void}
*/
export function loadWorker() {
return function(dispatch: Function) {
if (!window.Worker) {
logger.warn('Browser does not support web workers');
return;
}
worker = new Worker('libs/facial-expressions-worker.min.js', { name: 'Facial Expression Worker' });
worker.onmessage = function(e: Object) {
const { type, value } = e.data;
// receives a message indicating what type of backend tfjs decided to use.
// it is received after as a response to the first message sent to the worker.
if (type === 'tf-backend' && value) {
let detectionTimeInterval = -1;
if (value === 'webgl') {
detectionTimeInterval = WEBGL_TIME_INTERVAL;
} else if (value === 'cpu') {
detectionTimeInterval = CPU_TIME_INTERVAL;
}
dispatch(setDetectionTimeInterval(detectionTimeInterval));
}
// receives a message with the predicted facial expression.
if (type === 'facial-expression') {
sendDataToWorker(worker, imageCapture);
if (!value) {
return;
}
if (value === lastFacialExpression) {
duplicateConsecutiveExpressions++;
} else {
lastFacialExpression
&& dispatch(addFacialExpression(lastFacialExpression, duplicateConsecutiveExpressions + 1));
lastFacialExpression = value;
duplicateConsecutiveExpressions = 0;
}
}
};
};
}
/**
* Starts the recognition and detection of face expressions.
*
* @param {Object} stream - Video stream.
* @returns {Function}
*/
export function startFacialRecognition() {
return async function(dispatch: Function, getState: Function) {
if (worker === undefined || worker === null) {
return;
}
const state = getState();
const { recognitionActive } = state['features/facial-recognition'];
if (recognitionActive) {
return;
}
const localVideoTrack = getLocalVideoTrack(state['features/base/tracks']);
if (localVideoTrack === undefined) {
return;
}
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
if (stream === null) {
return;
}
dispatch({ type: START_FACIAL_RECOGNITION });
logger.log('Start face recognition');
const firstVideoTrack = stream.getVideoTracks()[0];
// $FlowFixMe
imageCapture = new ImageCapture(firstVideoTrack);
sendDataToWorker(worker, imageCapture);
};
}
/**
* Stops the recognition and detection of face expressions.
*
* @returns {void}
*/
export function stopFacialRecognition() {
return function(dispatch: Function, getState: Function) {
const state = getState();
const { recognitionActive } = state['features/facial-recognition'];
if (!recognitionActive) {
imageCapture = null;
return;
}
imageCapture = null;
worker.postMessage({
id: 'CLEAR_TIMEOUT'
});
lastFacialExpression
&& dispatch(addFacialExpression(lastFacialExpression, duplicateConsecutiveExpressions + 1));
duplicateConsecutiveExpressions = 0;
dispatch({ type: STOP_FACIAL_RECOGNITION });
logger.log('Stop face recognition');
};
}
/**
* Resets the track in the image capture.
*
* @returns {void}
*/
export function resetTrack() {
return function(dispatch: Function, getState: Function) {
const state = getState();
const { jitsiTrack: localVideoTrack } = getLocalVideoTrack(state['features/base/tracks']);
const stream = localVideoTrack.getOriginalStream();
const firstVideoTrack = stream.getVideoTracks()[0];
// $FlowFixMe
imageCapture = new ImageCapture(firstVideoTrack);
};
}
/**
* Changes the track from the image capture with a given one.
*
* @param {Object} track - The track that will be in the new image capture.
* @returns {void}
*/
export function changeTrack(track: Object) {
const { jitsiTrack } = track;
const stream = jitsiTrack.getOriginalStream();
const firstVideoTrack = stream.getVideoTracks()[0];
// $FlowFixMe
imageCapture = new ImageCapture(firstVideoTrack);
}
/**
* Adds a new facial expression and its duration.
*
* @param {string} facialExpression - Facial expression to be added.
* @param {number} duration - Duration in seconds of the facial expression.
* @returns {Object}
*/
function addFacialExpression(facialExpression: string, duration: number) {
return function(dispatch: Function, getState: Function) {
const { detectionTimeInterval } = getState()['features/facial-recognition'];
let finalDuration = duration;
if (detectionTimeInterval !== -1) {
finalDuration *= detectionTimeInterval / 1000;
}
dispatch({
type: ADD_FACIAL_EXPRESSION,
facialExpression,
duration: finalDuration
});
};
}
/**
* Sets the time interval for the detection worker post message.
*
* @param {number} time - The time interval.
* @returns {Object}
*/
function setDetectionTimeInterval(time: number) {
return {
type: SET_DETECTION_TIME_INTERVAL,
time
};
}

View File

@ -0,0 +1,11 @@
// @flow
export const FACIAL_EXPRESSION_EMOJIS = {
happy: '😊',
neutral: '😐',
sad: '🙁',
surprised: '😮',
angry: '😠',
fearful: '😨',
disgusted: '🤢'
};

View File

@ -0,0 +1,25 @@
/*
* Safari polyfill for createImageBitmap
* https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/createImageBitmap
*
* Support source image types: Canvas.
*/
if (!('createImageBitmap' in window)) {
window.createImageBitmap = async function(data) {
return new Promise((resolve, reject) => {
let dataURL;
if (data instanceof HTMLCanvasElement) {
dataURL = data.toDataURL();
} else {
reject(new Error('createImageBitmap does not handle the provided image source type'));
}
const img = document.createElement('img');
img.addEventListener('load', () => {
resolve(img);
});
img.src = dataURL;
});
};
}

View File

@ -0,0 +1,106 @@
/* eslint-disable */
// From: https://github.com/justadudewhohacks/face-api.js/issues/47
// This is needed because face-api.js does not support working in a WebWorker natively
// Updated Dec 1 2020 to work on latest Chrome (tested in WebWorkers on Chrome Mobile on Android / Google Pixel 3 as well)
if(!self.OffscreenCanvas) {
self.OffscreenCanvas = class OffscreenCanvas {
constructor() {
}
}
}
if(!self.OffscreenCanvasRenderingContext2D) {
self.OffscreenCanvasRenderingContext2D = class OffscreenCanvasRenderingContext2D {
constructor() {
}
}
}
self.Canvas = self.HTMLCanvasElement = OffscreenCanvas;
// self.HTMLCanvasElement.name = 'HTMLCanvasElement';
// self.Canvas.name = 'Canvas';
self.CanvasRenderingContext2D = OffscreenCanvasRenderingContext2D;
function HTMLImageElement(){}
function HTMLVideoElement(){}
self.Image = HTMLImageElement;
self.Video = HTMLVideoElement;
function Storage () {
let _data = {};
this.clear = function(){ return _data = {}; };
this.getItem = function(id){ return _data.hasOwnProperty(id) ? _data[id] : undefined; };
this.removeItem = function(id){ return delete _data[id]; };
this.setItem = function(id, val){ return _data[id] = String(val); };
}
class Document extends EventTarget {}
self.document = new Document();
self.window = self.Window = self;
self.localStorage = new Storage();
function createElement(element) {
switch(element) {
case 'canvas':
let canvas = new Canvas(1,1);
canvas.localName = 'canvas';
canvas.nodeName = 'CANVAS';
canvas.tagName = 'CANVAS';
canvas.nodeType = 1;
canvas.innerHTML = '';
canvas.remove = () => { console.log('nope'); };
return canvas;
default:
console.log('arg', element);
break;
}
}
document.createElement = createElement;
document.location = self.location;
// These are the same checks face-api.js/isBrowser does
if(!typeof window == 'object') {
console.warn("Check failed: window");
}
if(typeof document === 'undefined') {
console.warn("Check failed: document");
}
if(typeof HTMLImageElement === 'undefined') {
console.warn("Check failed: HTMLImageElement");
}
if(typeof HTMLCanvasElement === 'undefined') {
console.warn("Check failed: HTMLCanvasElement");
}
if(typeof HTMLVideoElement === 'undefined') {
console.warn("Check failed: HTMLVideoElement");
}
if(typeof ImageData === 'undefined') {
console.warn("Check failed: ImageData");
}
if(typeof CanvasRenderingContext2D === 'undefined') {
console.warn("Check failed: CanvasRenderingContext2D");
}
self.window = window;
self.document = document;
self.HTMLImageElement = HTMLImageElement;
self.HTMLVideoElement = HTMLVideoElement;
// These are the same checks face-api.js/isBrowser does
const isBrowserCheck = typeof window === 'object'
&& typeof document !== 'undefined'
&& typeof HTMLImageElement !== 'undefined'
&& typeof HTMLCanvasElement !== 'undefined'
&& typeof HTMLVideoElement !== 'undefined'
&& typeof ImageData !== 'undefined'
&& typeof CanvasRenderingContext2D !== 'undefined';
;
if(!isBrowserCheck) {
throw new Error("Failed to monkey patch for face-api, face-api will fail");
}

View File

@ -0,0 +1,116 @@
// @flow
import './faceApiPatch';
import * as faceapi from 'face-api.js';
/**
* A flag that indicates whether the tensorflow models were loaded or not.
*/
let modelsLoaded = false;
/**
* A flag that indicates whether the tensorflow backend is set or not.
*/
let backendSet = false;
/**
* A timer variable for set interval.
*/
let timer;
/**
* The duration of the set timeout.
*/
let timeoutDuration = -1;
/**
* Time used for detection interval when facial expressions worker uses webgl backend.
*/
const WEBGL_TIME_INTERVAL = 1000;
/**
* Time used for detection interval when facial expression worker uses cpu backend.
*/
const CPU_TIME_INTERVAL = 6000;
// eslint-disable-next-line no-unused-vars
const window = {
screen: {
width: 1280,
height: 720
}
};
onmessage = async function(message) {
// Receives image data
if (message.data.id === 'SET_TIMEOUT') {
if (message.data.imageData === null || message.data.imageData === undefined) {
return;
}
// the models are loaded
if (!modelsLoaded) {
await faceapi.loadTinyFaceDetectorModel('.');
await faceapi.loadFaceExpressionModel('.');
modelsLoaded = true;
}
faceapi.tf.engine().startScope();
const tensor = faceapi.tf.browser.fromPixels(message.data.imageData);
const detections = await faceapi.detectSingleFace(
tensor,
new faceapi.TinyFaceDetectorOptions()
).withFaceExpressions();
// The backend is set
if (!backendSet) {
const backend = faceapi.tf.getBackend();
if (backend !== undefined) {
if (backend === 'webgl') {
timeoutDuration = WEBGL_TIME_INTERVAL;
} else if (backend === 'cpu') {
timeoutDuration = CPU_TIME_INTERVAL;
}
self.postMessage({
type: 'tf-backend',
value: backend
});
backendSet = true;
}
}
faceapi.tf.engine().endScope();
let facialExpression;
if (detections) {
facialExpression = detections.expressions.asSortedArray()[0].expression;
}
if (timeoutDuration === -1) {
self.postMessage({
type: 'facial-expression',
value: facialExpression
});
} else {
timer = setTimeout(() => {
self.postMessage({
type: 'facial-expression',
value: facialExpression
});
}, timeoutDuration);
}
} else if (message.data.id === 'CLEAR_TIMEOUT') {
// Clear the timeout.
if (timer) {
clearTimeout(timer);
timer = null;
}
}
};

View File

@ -0,0 +1,90 @@
// @flow
import logger from './logger';
/**
* Sends the facial expression with its duration to all the other participants.
*
* @param {Object} conference - The current conference.
* @param {string} facialExpression - Facial expression to be sent.
* @param {number} duration - The duration of the facial expression in seconds.
* @returns {void}
*/
export function sendFacialExpressionToParticipants(
conference: Object,
facialExpression: string,
duration: number
): void {
try {
conference.sendEndpointMessage('', {
type: 'facial_expression',
facialExpression,
duration
});
} catch (err) {
logger.warn('Could not broadcast the facial expression to the other participants', err);
}
}
/**
* Sends the facial expression with its duration to xmpp server.
*
* @param {Object} conference - The current conference.
* @param {string} facialExpression - Facial expression to be sent.
* @param {number} duration - The duration of the facial expression in seconds.
* @returns {void}
*/
export function sendFacialExpressionToServer(
conference: Object,
facialExpression: string,
duration: number
): void {
try {
conference.sendFacialExpression({
facialExpression,
duration
});
} catch (err) {
logger.warn('Could not send the facial expression to xmpp server', err);
}
}
/**
* Sends the image data a canvas from the track in the image capture to the facial expression worker.
*
* @param {Worker} worker - Facial expression worker.
* @param {Object} imageCapture - Image capture that contains the current track.
* @returns {Promise<void>}
*/
export async function sendDataToWorker(
worker: Worker,
imageCapture: Object
): Promise<void> {
if (imageCapture === null || imageCapture === undefined) {
return;
}
let imageBitmap;
try {
imageBitmap = await imageCapture.grabFrame();
} catch (err) {
logger.warn(err);
return;
}
const canvas = document.createElement('canvas');
const context = canvas.getContext('2d');
canvas.width = imageBitmap.width;
canvas.height = imageBitmap.height;
context.drawImage(imageBitmap, 0, 0);
const imageData = context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
worker.postMessage({
id: 'SET_TIMEOUT',
imageData
});
}

View File

@ -0,0 +1 @@
export * from './actions';

View File

@ -0,0 +1,5 @@
// @flow
import { getLogger } from '../base/logging/functions';
export default getLogger('features/facial-recognition');

View File

@ -0,0 +1,106 @@
// @flow
import {
CONFERENCE_JOINED,
CONFERENCE_WILL_LEAVE,
getCurrentConference
} from '../base/conference';
import { getParticipantCount } from '../base/participants';
import { MiddlewareRegistry } from '../base/redux';
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks';
import { VIRTUAL_BACKGROUND_TRACK_CHANGED } from '../virtual-background/actionTypes';
import { ADD_FACIAL_EXPRESSION } from './actionTypes';
import {
changeTrack,
loadWorker,
resetTrack,
stopFacialRecognition,
startFacialRecognition
} from './actions';
import { sendFacialExpressionToParticipants, sendFacialExpressionToServer } from './functions';
MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
const { enableFacialRecognition } = getState()['features/base/config'];
if (!enableFacialRecognition) {
return next(action);
}
if (action.type === CONFERENCE_JOINED) {
dispatch(loadWorker());
dispatch(startFacialRecognition());
return next(action);
}
if (!getCurrentConference(getState())) {
return next(action);
}
switch (action.type) {
case CONFERENCE_WILL_LEAVE : {
dispatch(stopFacialRecognition());
return next(action);
}
case TRACK_UPDATED: {
const { videoType, type } = action.track.jitsiTrack;
if (videoType === 'camera') {
const { muted, videoStarted } = action.track;
if (videoStarted === true) {
dispatch(startFacialRecognition());
}
if (muted !== undefined) {
if (muted) {
dispatch(stopFacialRecognition());
} else {
dispatch(startFacialRecognition());
type === 'presenter' && changeTrack(action.track);
}
}
}
return next(action);
}
case TRACK_ADDED: {
const { mediaType, videoType } = action.track;
if (mediaType === 'presenter' && videoType === 'camera') {
dispatch(startFacialRecognition());
changeTrack(action.track);
}
return next(action);
}
case TRACK_REMOVED: {
const { videoType } = action.track.jitsiTrack;
if ([ 'camera', 'desktop' ].includes(videoType)) {
dispatch(stopFacialRecognition());
}
return next(action);
}
case VIRTUAL_BACKGROUND_TRACK_CHANGED: {
dispatch(resetTrack());
return next(action);
}
case ADD_FACIAL_EXPRESSION: {
const state = getState();
const conference = getCurrentConference(state);
if (getParticipantCount(state) > 1) {
sendFacialExpressionToParticipants(conference, action.facialExpression, action.duration);
}
sendFacialExpressionToServer(conference, action.facialExpression, action.duration);
return next(action);
}
}
return next(action);
});

View File

@ -0,0 +1,55 @@
// @flow
import { ReducerRegistry } from '../base/redux';
import {
ADD_FACIAL_EXPRESSION,
SET_DETECTION_TIME_INTERVAL,
START_FACIAL_RECOGNITION,
STOP_FACIAL_RECOGNITION
} from './actionTypes';
const defaultState = {
facialExpressions: {
happy: 0,
neutral: 0,
surprised: 0,
angry: 0,
fearful: 0,
disgusted: 0,
sad: 0
},
detectionTimeInterval: -1,
recognitionActive: false
};
ReducerRegistry.register('features/facial-recognition', (state = defaultState, action) => {
switch (action.type) {
case ADD_FACIAL_EXPRESSION: {
state.facialExpressions[action.facialExpression] += action.duration;
return state;
}
case SET_DETECTION_TIME_INTERVAL: {
return {
...state,
detectionTimeInterval: action.time
};
}
case START_FACIAL_RECOGNITION: {
return {
...state,
recognitionActive: true
};
}
case STOP_FACIAL_RECOGNITION: {
return {
...state,
recognitionActive: false
};
}
}
return state;
});

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
[{"weights":[{"name":"conv0/filters","shape":[3,3,3,16],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.009007044399485869,"min":-1.2069439495311063}},{"name":"conv0/bias","shape":[16],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.005263455241334205,"min":-0.9211046672334858}},{"name":"conv1/depthwise_filter","shape":[3,3,16,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.004001977630690033,"min":-0.5042491814669441}},{"name":"conv1/pointwise_filter","shape":[1,1,16,32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.013836609615999109,"min":-1.411334180831909}},{"name":"conv1/bias","shape":[32],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0015159862590771096,"min":-0.30926119685173037}},{"name":"conv2/depthwise_filter","shape":[3,3,32,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002666276225856706,"min":-0.317286870876948}},{"name":"conv2/pointwise_filter","shape":[1,1,32,64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.015265831292844286,"min":-1.6792414422128714}},{"name":"conv2/bias","shape":[64],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0020280554598453,"min":-0.37113414915168985}},{"name":"conv3/depthwise_filter","shape":[3,3,64,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006100742489683862,"min":-0.8907084034938438}},{"name":"conv3/pointwise_filter","shape":[1,1,64,128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.016276211832083907,"min":-2.0508026908425725}},{"name":"conv3/bias","shape":[128],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.003394414279975143,"min":-0.7637432129944072}},{"name":"conv4/depthwise_filter","shape":[3,3,128,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.006716050119961009,"min":-0.8059260143953211}},{"name":"conv4/pointwise_filter","shape":[1,1,128,256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.021875603993733724,"min":-2.8875797271728514}},{"name":"conv4/bias","shape":[256],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.0041141652009066415,"min":-0.8187188749804216}},{"name":"conv5/depthwise_filter","shape":[3,3,256,1],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008423839597141042,"min":-0.9013508368940915}},{"name":"conv5/pointwise_filter","shape":[1,1,256,512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.030007277283014035,"min":-3.8709387695088107}},{"name":"conv5/bias","shape":[512],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.008402082966823203,"min":-1.4871686851277068}},{"name":"conv8/filters","shape":[1,1,512,25],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.028336129469030042,"min":-4.675461362389957}},{"name":"conv8/bias","shape":[25],"dtype":"float32","quantization":{"dtype":"uint8","scale":0.002268134028303857,"min":-0.41053225912299807}}],"paths":["tiny_face_detector_model-shard1"]}]

View File

@ -18,6 +18,8 @@ import SpeakerStatsSearch from './SpeakerStatsSearch';
declare var interfaceConfig: Object;
declare var APP;
/**
* The type of the React {@code Component} props of {@link SpeakerStats}.
*/
@ -28,6 +30,23 @@ type Props = {
*/
_localDisplayName: string,
/**
* The flag which shows if the facial recognition is enabled, obtained from the redux store.
* if enabled facial expressions are shown
*/
_enableFacialRecognition: boolean,
/**
* The facial expressions for the local participant obtained from the redux store.
*/
_localFacialExpressions: Array<Object>,
/**
* The flag which shows if all the facial expressions are shown or only 4
* if true show only 4, if false show all
*/
_reduceExpressions: boolean,
/**
* The speaker paricipant stats.
*/
@ -51,7 +70,10 @@ type Props = {
/**
* The function to translate human-readable text.
*/
t: Function
t: Function,
stats: Object,
lastFacialExpression: string,
};
/**
@ -111,10 +133,13 @@ class SpeakerStats extends Component<Props> {
<Dialog
cancelKey = 'dialog.close'
submitDisabled = { true }
titleKey = 'speakerStats.speakerStats'>
titleKey = 'speakerStats.speakerStats'
width = { this.props._enableFacialRecognition ? 'large' : 'medium' }>
<div className = 'speaker-stats'>
<SpeakerStatsSearch onSearch = { this._onSearch } />
<SpeakerStatsLabels />
<SpeakerStatsLabels
reduceExpressions = { this.props._reduceExpressions }
showFacialExpressions = { this.props._enableFacialRecognition } />
{ items }
</div>
</Dialog>
@ -139,14 +164,22 @@ class SpeakerStats extends Component<Props> {
const isDominantSpeaker = statsModel.isDominantSpeaker();
const dominantSpeakerTime = statsModel.getTotalDominantSpeakerTime();
const hasLeft = statsModel.hasLeft();
let facialExpressions;
if (this.props._enableFacialRecognition) {
facialExpressions = statsModel.getFacialExpressions();
}
return (
<SpeakerStatsItem
displayName = { statsModel.getDisplayName() }
dominantSpeakerTime = { dominantSpeakerTime }
facialExpressions = { facialExpressions }
hasLeft = { hasLeft }
isDominantSpeaker = { isDominantSpeaker }
key = { userId } />
key = { userId }
reduceExpressions = { this.props._reduceExpressions }
showFacialExpressions = { this.props._enableFacialRecognition } />
);
}
@ -195,6 +228,9 @@ class SpeakerStats extends Component<Props> {
? `${this.props._localDisplayName} (${meString})`
: meString
);
if (this.props._enableFacialRecognition) {
stats[userId].setFacialExpressions(this.props._localFacialExpressions);
}
}
if (!stats[userId].getDisplayName()) {
@ -222,6 +258,10 @@ class SpeakerStats extends Component<Props> {
*/
function _mapStateToProps(state) {
const localParticipant = getLocalParticipant(state);
const { enableFacialRecognition } = state['features/base/config'];
const { facialExpressions: localFacialExpressions } = state['features/facial-recognition'];
const { cameraTimeTracker: localCameraTimeTracker } = state['features/facial-recognition'];
const { clientWidth } = state['features/base/responsive-ui'];
return {
/**
@ -232,7 +272,11 @@ function _mapStateToProps(state) {
*/
_localDisplayName: localParticipant && localParticipant.name,
_stats: getSpeakerStats(state),
_criteria: getSearchCriteria(state)
_criteria: getSearchCriteria(state),
_enableFacialRecognition: enableFacialRecognition,
_localFacialExpressions: localFacialExpressions,
_localCameraTimeTracker: localCameraTimeTracker,
_reduceExpressions: clientWidth < 750
};
}

View File

@ -21,6 +21,12 @@ type Props = {
*/
dominantSpeakerTime: number,
/**
* The object that has as keys the facial expressions of the
* participant and as values a number that represents the count .
*/
facialExpressions: Object,
/**
* True if the participant is no longer in the meeting.
*/
@ -31,6 +37,16 @@ type Props = {
*/
isDominantSpeaker: boolean,
/**
* True if the client width is les than 750.
*/
reduceExpressions: boolean,
/**
* True if the facial recognition is not disabled.
*/
showFacialExpressions: boolean,
/**
* Invoked to obtain translated strings.
*/
@ -64,15 +80,66 @@ class SpeakerStatsItem extends Component<Props> {
</div>
<div
aria-label = { this.props.t('speakerStats.speakerStats') }
className = 'speaker-stats-item__name'>
className = { `speaker-stats-item__name${
this.props.showFacialExpressions ? '_expressions_on' : ''
}` }>
{ this.props.displayName }
</div>
<div
aria-label = { this.props.t('speakerStats.speakerTime') }
className = 'speaker-stats-item__time'>
className = { `speaker-stats-item__time${
this.props.showFacialExpressions ? '_expressions_on' : ''
}` }>
<TimeElapsed
time = { this.props.dominantSpeakerTime } />
</div>
{ this.props.showFacialExpressions
&& (
<>
<div
aria-label = { 'Happy' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.happy }
</div>
<div
aria-label = { 'Neutral' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.neutral }
</div>
<div
aria-label = { 'Sad' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.sad }
</div>
<div
aria-label = { 'Surprised' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.surprised }
</div>
{!this.props.reduceExpressions && (
<>
<div
aria-label = { 'Angry' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.angry }
</div>
<div
aria-label = { 'Fearful' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.fearful }
</div>
<div
aria-label = { 'Disgusted' }
className = 'speaker-stats-item__expression'>
{ this.props.facialExpressions.disgusted }
</div>
</>
)}
</>
)
}
</div>
);
}

View File

@ -3,16 +3,28 @@
import React, { Component } from 'react';
import { translate } from '../../base/i18n';
import { Tooltip } from '../../base/tooltip';
import { FACIAL_EXPRESSION_EMOJIS } from '../../facial-recognition/constants.js';
/**
* The type of the React {@code Component} props of {@link SpeakerStatsLabels}.
*/
type Props = {
/**
* True if the client width is les than 750.
*/
reduceExpressions: boolean,
/**
* True if the facial recognition is not disabled.
*/
showFacialExpressions: boolean,
/**
* The function to translate human-readable text.
*/
t: Function
t: Function,
};
/**
@ -33,12 +45,43 @@ class SpeakerStatsLabels extends Component<Props> {
return (
<div className = 'speaker-stats-item__labels'>
<div className = 'speaker-stats-item__status' />
<div className = 'speaker-stats-item__name'>
<div
className = { `speaker-stats-item__name${
this.props.showFacialExpressions ? '_expressions_on' : ''
}` }>
{ t('speakerStats.name') }
</div>
<div className = 'speaker-stats-item__time'>
<div
className = { `speaker-stats-item__time${
this.props.showFacialExpressions ? '_expressions_on' : ''
}` }>
{ t('speakerStats.speakerTime') }
</div>
{this.props.showFacialExpressions
&& (this.props.reduceExpressions
? Object.keys(FACIAL_EXPRESSION_EMOJIS)
.filter(expression => ![ 'angry', 'fearful', 'disgusted' ].includes(expression))
: Object.keys(FACIAL_EXPRESSION_EMOJIS)
).map(
expression => (
<div
className = 'speaker-stats-item__expression'
key = { expression }>
<Tooltip
content = { t(`speakerStats.${expression}`) }
position = { 'top' } >
<div
// eslint-disable-next-line react-native/no-inline-styles
style = {{ fontSize: 17 }}>
{ FACIAL_EXPRESSION_EMOJIS[expression] }
</div>
</Tooltip>
</div>
))
}
</div>
);
}

View File

@ -23,3 +23,13 @@ export const BACKGROUND_ENABLED = 'BACKGROUND_ENABLED';
* }}
*/
export const SET_VIRTUAL_BACKGROUND = 'SET_VIRTUAL_BACKGROUND';
/**
* The type which signals if the local track was changed due to a changes of the virtual background.
*
* @returns {{
* type: VIRTUAL_BACKGROUND_TRACK_CHANGED
*}}
*/
export const VIRTUAL_BACKGROUND_TRACK_CHANGED = 'VIRTUAL_BACKGROUND_TRACK_CHANGED';

View File

@ -2,7 +2,7 @@
import { createVirtualBackgroundEffect } from '../stream-effects/virtual-background';
import { BACKGROUND_ENABLED, SET_VIRTUAL_BACKGROUND } from './actionTypes';
import { BACKGROUND_ENABLED, SET_VIRTUAL_BACKGROUND, VIRTUAL_BACKGROUND_TRACK_CHANGED } from './actionTypes';
import logger from './logger';
/**
@ -71,3 +71,16 @@ export function backgroundEnabled(backgroundEffectEnabled: boolean) {
backgroundEffectEnabled
};
}
/**
* Signals if the local track was changed due to a changes of the virtual background.
*
* @returns {{
* type: VIRTUAL_BACKGROUND_TRACK_CHANGED
*}}.
*/
export function virtualBackgroundTrackChanged() {
return {
type: VIRTUAL_BACKGROUND_TRACK_CHANGED
};
}

View File

@ -17,7 +17,7 @@ import { updateSettings } from '../../base/settings';
import { Tooltip } from '../../base/tooltip';
import { getLocalVideoTrack } from '../../base/tracks';
import { showErrorNotification } from '../../notifications';
import { toggleBackgroundEffect } from '../actions';
import { toggleBackgroundEffect, virtualBackgroundTrackChanged } from '../actions';
import { IMAGES, BACKGROUNDS_LIMIT, VIRTUAL_BACKGROUND_TYPE, type Image } from '../constants';
import { toDataURL } from '../functions';
import logger from '../logger';
@ -351,6 +351,7 @@ function VirtualBackground({
dispatch(hideDialog());
logger.info(`Virtual background type: '${typeof options.backgroundType === 'undefined'
? 'none' : options.backgroundType}' applied!`);
dispatch(virtualBackgroundTrackChanged());
}, [ dispatch, options, _localFlipX ]);
// Prevent the selection of a new virtual background if it has not been applied by default

View File

@ -45,7 +45,7 @@ function on_message(event)
log("warn", "No room found %s", roomAddress);
return false;
end
if not room.speakerStats then
log("warn", "No speakerStats found for %s", roomAddress);
return false;
@ -77,6 +77,32 @@ function on_message(event)
room.speakerStats['dominantSpeakerId'] = occupant.jid;
end
local facialExpression = event.stanza:get_child('facialExpression', 'http://jitsi.org/jitmeet');
if facialExpression then
local roomAddress = facialExpression.attr.room;
local room = get_room_from_jid(room_jid_match_rewrite(roomAddress));
if not room then
log("warn", "No room found %s", roomAddress);
return false;
end
if not room.speakerStats then
log("warn", "No speakerStats found for %s", roomAddress);
return false;
end
local from = event.stanza.attr.from;
local occupant = room:get_occupant_by_real_jid(from);
if not occupant then
log("warn", "No occupant %s found for %s", from, roomAddress);
return false;
end
local facialExpressions = room.speakerStats[occupant.jid].facialExpressions;
facialExpressions[facialExpression.attr.expression] =
facialExpressions[facialExpression.attr.expression] + tonumber(facialExpression.attr.duration);
end
return true
end
@ -91,6 +117,15 @@ function new_SpeakerStats(nick, context_user)
nick = nick;
context_user = context_user;
displayName = nil;
facialExpressions = {
happy = 0,
neutral = 0,
surprised = 0,
angry = 0,
fearful = 0,
disgusted = 0,
sad = 0
};
}, SpeakerStats);
end
@ -151,7 +186,9 @@ function occupant_joined(event)
-- and skip focus if sneaked into the table
if values.nick ~= nil and values.nick ~= 'focus' then
local totalDominantSpeakerTime = values.totalDominantSpeakerTime;
if totalDominantSpeakerTime > 0 or room:get_occupant_jid(jid) == nil or values:isDominantSpeaker() then
local facialExpressions = values.facialExpressions;
if totalDominantSpeakerTime > 0 or room:get_occupant_jid(jid) == nil or values:isDominantSpeaker()
or get_participant_expressions_count(facialExpressions) > 0 then
-- before sending we need to calculate current dominant speaker state
if values:isDominantSpeaker() then
local timeElapsed = math.floor(socket.gettime()*1000 - values._dominantSpeakerStart);
@ -160,7 +197,8 @@ function occupant_joined(event)
users_json[values.nick] = {
displayName = values.displayName,
totalDominantSpeakerTime = totalDominantSpeakerTime
totalDominantSpeakerTime = totalDominantSpeakerTime,
facialExpressions = facialExpressions
};
end
end
@ -194,7 +232,7 @@ function occupant_leaving(event)
if is_healthcheck_room(room.jid) then
return;
end
if not room.speakerStats then
return;
end
@ -246,3 +284,12 @@ if prosody.hosts[muc_component_host] == nil then
else
process_host(muc_component_host);
end
function get_participant_expressions_count(facialExpressions)
local count = 0;
for expression, value in pairs(facialExpressions) do
count = count + value;
end
return count;
end

View File

@ -383,6 +383,16 @@ module.exports = (_env, argv) => {
...getBundleAnalyzerPlugin(analyzeBundle, 'external_api')
],
performance: getPerformanceHints(perfHintOptions, 35 * 1024)
}),
Object.assign({}, config, {
entry: {
'facial-expressions-worker': './react/features/facial-recognition/facialExpressionsWorker.js'
},
plugins: [
...config.plugins,
...getBundleAnalyzerPlugin(analyzeBundle, 'facial-expressions-worker')
],
performance: getPerformanceHints(perfHintOptions, 1024 * 1024)
})
];
};