jiti-meet/react/features/speaker-stats/components/web/SpeakerStatsLabels.js

80 lines
2.2 KiB
JavaScript
Raw Normal View History

/* @flow */
import { makeStyles } from '@material-ui/core/styles';
import React from 'react';
import { useTranslation } from 'react-i18next';
import { Tooltip } from '../../../base/tooltip';
import { FACIAL_EXPRESSION_EMOJIS } from '../../../facial-recognition/constants.js';
const useStyles = makeStyles(theme => {
return {
labels: {
padding: '22px 0 7px 0',
height: 20
},
emojis: {
paddingLeft: 27,
...theme.typography.bodyShortRegularLarge,
lineHeight: `${theme.typography.bodyShortRegular.lineHeightLarge}px`
}
};
});
/**
* The type of the React {@code Component} props of {@link SpeakerStatsLabels}.
*/
type Props = {
feat(facial-expressions): add the facial expression feature and display them in speakerstats (#10006) * Initial implementation; Happy flow * Maybe revert this * Functional prototype * feat(facial-expressions): get stream when changing background effect and use presenter effect with camera * add(facial-expressions): array that stores the expressions durin the meeting * refactor(facial-expressions): capture imagebitmap from stream with imagecapture api * add(speaker-stats): expression label * fix(facial-expression): expression store * revert: expression leabel on speaker stats * add(facial-expressions): broadcast of expression when it changes * feat: facial expression handling on prosody * fix(facial-expressions): get the right track when opening and closing camera * add(speaker-stats): facial expression column * fix(facial-expressions): allow to start facial recognition only after joining conference * fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it * chore(facial-expressions): change detection from 2000ms to 1000ms * add(facial-expressions): send expression to server when there is only one participant * feat(facial-expressions): store expresions as a timeline * feat(mod_speakerstats_component): store facial expresions as a timeline * fix(facial-expressions): stop facial recognition only when muting video track * fix(facial-expressions): presenter mode get right track to detect face * add: polyfils for image capture for firefox and safari * refactor(facial-expressions): store expressions by counting them in a map * chore(facial-expressions): remove manually assigning the backend for tenserflowjs * feat(facial-expressions): move face-api from main thread to web worker * fix(facial-expressions): make feature work on firefox and safari * feat(facial-expressions): camera time tracker * feat(facial-expressions): camera time tracker in prosody * add(facial-expressions): expressions time as TimeElapsed object in speaker stats * fix(facial-expresions): lower the frequency of detection when tf uses cpu backend * add(facial-expressions): duration to the expression and send it with durantion when it is done * fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp * refactor(facial-expressions): change expressions labels from text to emoji * refactor(facial-expressions): remove camera time tracker * add(facial-expressions): detection time interval * chore(facial-expressions): add docs and minor refactor of the code * refactor(facial-expressions): put timeout in worker and remove set interval in main thread * feat(facial-expressions): disable feature in the config * add(facial-expressions): tooltips of labels in speaker stats * refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs * refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware * chore(facial-expressions): order imports and format some code * fix(facial-expressions): rebase issues with newer master * fix(facial-expressions): package-lock.json * fix(facial-expression): add commented default value of disableFacialRecognition flag and short description * fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config * fix: resources load-test package-lock.json * fix(facial-expressions): set and get facial expressions only if facial recognition enabled * add: facial recognition resources folder in .eslintignore * chore: package-lock update * fix: package-lock.json * fix(facial-expressions): gpu memory leak in the web worker * fix(facial-expressions): set cpu time interval for detection to 6000ms * chore(speaker-stats): fix indentation * chore(facial-expressions): remove empty lines between comments and type declarations * fix(facial-expressions): remove camera timetracker * fix(facial-expressions): remove facialRecognitionAllowed flag * fix(facial-expressions): remove sending interval time to worker * refactor(facial-expression): middleware * fix(facial-expression): end tensor scope after setting backend * fix(facial-expressions): sending info back to worker only on facial expression message * fix: lint errors * refactor(facial-expressions): bundle web worker using webpack * fix: deploy-facial-expressions command in makefile * chore: fix load test package-lock.json and package.json * chore: sync package-lock.json Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
2021-11-17 14:33:03 +00:00
/**
* True if the facial recognition is not disabled.
*/
showFacialExpressions: boolean,
};
const SpeakerStatsLabels = (props: Props) => {
const { t } = useTranslation();
const classes = useStyles();
const FacialExpressionsLabels = () => Object.keys(FACIAL_EXPRESSION_EMOJIS).map(
expression => (
feat(facial-expressions): add the facial expression feature and display them in speakerstats (#10006) * Initial implementation; Happy flow * Maybe revert this * Functional prototype * feat(facial-expressions): get stream when changing background effect and use presenter effect with camera * add(facial-expressions): array that stores the expressions durin the meeting * refactor(facial-expressions): capture imagebitmap from stream with imagecapture api * add(speaker-stats): expression label * fix(facial-expression): expression store * revert: expression leabel on speaker stats * add(facial-expressions): broadcast of expression when it changes * feat: facial expression handling on prosody * fix(facial-expressions): get the right track when opening and closing camera * add(speaker-stats): facial expression column * fix(facial-expressions): allow to start facial recognition only after joining conference * fix(mod_speakerstats_component): storing last emotion in speaker stats component and sending it * chore(facial-expressions): change detection from 2000ms to 1000ms * add(facial-expressions): send expression to server when there is only one participant * feat(facial-expressions): store expresions as a timeline * feat(mod_speakerstats_component): store facial expresions as a timeline * fix(facial-expressions): stop facial recognition only when muting video track * fix(facial-expressions): presenter mode get right track to detect face * add: polyfils for image capture for firefox and safari * refactor(facial-expressions): store expressions by counting them in a map * chore(facial-expressions): remove manually assigning the backend for tenserflowjs * feat(facial-expressions): move face-api from main thread to web worker * fix(facial-expressions): make feature work on firefox and safari * feat(facial-expressions): camera time tracker * feat(facial-expressions): camera time tracker in prosody * add(facial-expressions): expressions time as TimeElapsed object in speaker stats * fix(facial-expresions): lower the frequency of detection when tf uses cpu backend * add(facial-expressions): duration to the expression and send it with durantion when it is done * fix(facial-expressions): prosody speaker stats covert fro string to number and bool values set by xmpp * refactor(facial-expressions): change expressions labels from text to emoji * refactor(facial-expressions): remove camera time tracker * add(facial-expressions): detection time interval * chore(facial-expressions): add docs and minor refactor of the code * refactor(facial-expressions): put timeout in worker and remove set interval in main thread * feat(facial-expressions): disable feature in the config * add(facial-expressions): tooltips of labels in speaker stats * refactor(facial-expressions): send facial expressions function and remove some unused functions and console logs * refactor(facial-expressions): rename action type when a change is done to the track by the virtual backgrounds to be used in facial expressions middleware * chore(facial-expressions): order imports and format some code * fix(facial-expressions): rebase issues with newer master * fix(facial-expressions): package-lock.json * fix(facial-expression): add commented default value of disableFacialRecognition flag and short description * fix(facial-expressions): change disableFacialRecognition to enableFacialRecognition flag in config * fix: resources load-test package-lock.json * fix(facial-expressions): set and get facial expressions only if facial recognition enabled * add: facial recognition resources folder in .eslintignore * chore: package-lock update * fix: package-lock.json * fix(facial-expressions): gpu memory leak in the web worker * fix(facial-expressions): set cpu time interval for detection to 6000ms * chore(speaker-stats): fix indentation * chore(facial-expressions): remove empty lines between comments and type declarations * fix(facial-expressions): remove camera timetracker * fix(facial-expressions): remove facialRecognitionAllowed flag * fix(facial-expressions): remove sending interval time to worker * refactor(facial-expression): middleware * fix(facial-expression): end tensor scope after setting backend * fix(facial-expressions): sending info back to worker only on facial expression message * fix: lint errors * refactor(facial-expressions): bundle web worker using webpack * fix: deploy-facial-expressions command in makefile * chore: fix load test package-lock.json and package.json * chore: sync package-lock.json Co-authored-by: Mihai-Andrei Uscat <mihai.uscat@8x8.com>
2021-11-17 14:33:03 +00:00
<div
className = 'expression'
key = { expression }>
<Tooltip
content = { t(`speakerStats.${expression}`) }
position = { 'top' } >
<div>
{ FACIAL_EXPRESSION_EMOJIS[expression] }
</div>
</Tooltip>
</div>
)
);
const nameTimeClass = `name-time${
props.showFacialExpressions ? ' name-time_expressions-on' : ''
}`;
return (
<div className = { `row ${classes.labels}` }>
<div className = 'avatar' />
<div className = { nameTimeClass }>
<div>
{ t('speakerStats.name') }
</div>
<div>
{ t('speakerStats.speakerTime') }
</div>
</div>
{
props.showFacialExpressions
&& <div className = { `expressions ${classes.emojis}` }>
<FacialExpressionsLabels />
</div>
}
</div>
);
};
export default SpeakerStatsLabels;