feat(virtual-backgrounds) use new Open Source model
https://google.github.io/mediapipe/solutions/models.html#selfie-segmentation
This commit is contained in:
parent
e3d38b197f
commit
09441c2632
|
@ -166,14 +166,11 @@ export default class JitsiStreamBackgroundEffect {
|
||||||
const outputMemoryOffset = this._model._getOutputMemoryOffset() / 4;
|
const outputMemoryOffset = this._model._getOutputMemoryOffset() / 4;
|
||||||
|
|
||||||
for (let i = 0; i < this._segmentationPixelCount; i++) {
|
for (let i = 0; i < this._segmentationPixelCount; i++) {
|
||||||
const background = this._model.HEAPF32[outputMemoryOffset + (i * 2)];
|
const person = this._model.HEAPF32[outputMemoryOffset + i];
|
||||||
const person = this._model.HEAPF32[outputMemoryOffset + (i * 2) + 1];
|
|
||||||
const shift = Math.max(background, person);
|
|
||||||
const backgroundExp = Math.exp(background - shift);
|
|
||||||
const personExp = Math.exp(person - shift);
|
|
||||||
|
|
||||||
// Sets only the alpha component of each pixel.
|
// Sets only the alpha component of each pixel.
|
||||||
this._segmentationMask.data[(i * 4) + 3] = (255 * personExp) / (backgroundExp + personExp);
|
this._segmentationMask.data[(i * 4) + 3] = 255 * person;
|
||||||
|
|
||||||
}
|
}
|
||||||
this._segmentationMaskCtx.putImageData(this._segmentationMask, 0, 0);
|
this._segmentationMaskCtx.putImageData(this._segmentationMask, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,7 @@ import JitsiStreamBackgroundEffect from './JitsiStreamBackgroundEffect';
|
||||||
import createTFLiteModule from './vendor/tflite/tflite';
|
import createTFLiteModule from './vendor/tflite/tflite';
|
||||||
import createTFLiteSIMDModule from './vendor/tflite/tflite-simd';
|
import createTFLiteSIMDModule from './vendor/tflite/tflite-simd';
|
||||||
const models = {
|
const models = {
|
||||||
model96: 'libs/segm_lite_v681.tflite',
|
modelLandscape: 'libs/selfie_segmentation_landscape.tflite'
|
||||||
model144: 'libs/segm_full_v679.tflite'
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let tflite;
|
let tflite;
|
||||||
|
@ -18,11 +17,7 @@ let wasmCheck;
|
||||||
let isWasmDisabled = false;
|
let isWasmDisabled = false;
|
||||||
|
|
||||||
const segmentationDimensions = {
|
const segmentationDimensions = {
|
||||||
model96: {
|
modelLandscape: {
|
||||||
height: 96,
|
|
||||||
width: 160
|
|
||||||
},
|
|
||||||
model144: {
|
|
||||||
height: 144,
|
height: 144,
|
||||||
width: 256
|
width: 256
|
||||||
}
|
}
|
||||||
|
@ -83,7 +78,7 @@ export async function createVirtualBackgroundEffect(virtualBackground: Object, d
|
||||||
}
|
}
|
||||||
|
|
||||||
const modelBufferOffset = tflite._getModelBufferMemoryOffset();
|
const modelBufferOffset = tflite._getModelBufferMemoryOffset();
|
||||||
const modelResponse = await fetch(wasmCheck.feature.simd ? models.model144 : models.model96);
|
const modelResponse = await fetch(models.modelLandscape);
|
||||||
|
|
||||||
if (!modelResponse.ok) {
|
if (!modelResponse.ok) {
|
||||||
throw new Error('Failed to download tflite model!');
|
throw new Error('Failed to download tflite model!');
|
||||||
|
@ -96,7 +91,7 @@ export async function createVirtualBackgroundEffect(virtualBackground: Object, d
|
||||||
tflite._loadModel(model.byteLength);
|
tflite._loadModel(model.byteLength);
|
||||||
|
|
||||||
const options = {
|
const options = {
|
||||||
...wasmCheck.feature.simd ? segmentationDimensions.model144 : segmentationDimensions.model96,
|
...segmentationDimensions.modelLandscape,
|
||||||
virtualBackground
|
virtualBackground
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Virtual Background on stream effects
|
# Virtual Background on stream effects
|
||||||
|
|
||||||
> Inspired from https://ai.googleblog.com/2020/10/background-features-in-google-meet.html and https://github.com/Volcomix/virtual-background.git
|
> From https://google.github.io/mediapipe/solutions/models.html#selfie-segmentation
|
||||||
|
|
||||||
#### Canvas 2D + CPU
|
#### Canvas 2D + CPU
|
||||||
|
|
||||||
|
@ -22,15 +22,3 @@ More details:
|
||||||
- [WebAssembly](https://webassembly.org/)
|
- [WebAssembly](https://webassembly.org/)
|
||||||
- [WebAssembly SIMD](https://github.com/WebAssembly/simd)
|
- [WebAssembly SIMD](https://github.com/WebAssembly/simd)
|
||||||
- [TFLite](https://blog.tensorflow.org/2020/07/accelerating-tensorflow-lite-xnnpack-integration.html)
|
- [TFLite](https://blog.tensorflow.org/2020/07/accelerating-tensorflow-lite-xnnpack-integration.html)
|
||||||
|
|
||||||
## LICENSE
|
|
||||||
|
|
||||||
The mdoels vendored here were downloaded early January (they were available as early as the 4th), before Google switched the license away from Apache 2. Thus we understand they are not covered by the new license which according to the [model card](https://drive.google.com/file/d/1lnP1bRi9CSqQQXUHa13159vLELYDgDu0/view) dates from the 21st of January.
|
|
||||||
|
|
||||||
We are not lawyers so do get legal advise if in doubt.
|
|
||||||
|
|
||||||
References:
|
|
||||||
|
|
||||||
- Model license discussion: https://github.com/tensorflow/tfjs/issues/4177
|
|
||||||
- Current vendored model is discovered: https://github.com/tensorflow/tfjs/issues/4177#issuecomment-753934631
|
|
||||||
- License change is noticed: https://github.com/tensorflow/tfjs/issues/4177#issuecomment-771536641
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
BIN
react/features/stream-effects/virtual-background/vendor/models/selfie_segmentation_landscape.tflite
vendored
Executable file
BIN
react/features/stream-effects/virtual-background/vendor/models/selfie_segmentation_landscape.tflite
vendored
Executable file
Binary file not shown.
Loading…
Reference in New Issue