diff --git a/dist/build/selfie_segmentation_landscape.tflite b/dist/build/selfie_segmentation_landscape.tflite new file mode 100644 index 0000000..4ea3f8a Binary files /dev/null and b/dist/build/selfie_segmentation_landscape.tflite differ diff --git a/dist/build/tflite-1-0-0.js b/dist/build/tflite-1-0-0.js new file mode 100644 index 0000000..7d2e41c --- /dev/null +++ b/dist/build/tflite-1-0-0.js @@ -0,0 +1,21 @@ + +var createTwilioTFLiteModule = (function() { + var _scriptDir = typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined; + if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename; + return ( +function(createTwilioTFLiteModule) { + createTwilioTFLiteModule = createTwilioTFLiteModule || {}; + +var Module=typeof createTwilioTFLiteModule!=="undefined"?createTwilioTFLiteModule:{};var readyPromiseResolve,readyPromiseReject;Module["ready"]=new Promise(function(resolve,reject){readyPromiseResolve=resolve;readyPromiseReject=reject});var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var arguments_=[];var thisProgram="./this.program";var quit_=function(status,toThrow){throw toThrow};var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;ENVIRONMENT_IS_WEB=typeof window==="object";ENVIRONMENT_IS_WORKER=typeof importScripts==="function";ENVIRONMENT_IS_NODE=typeof process==="object"&&typeof process.versions==="object"&&typeof process.versions.node==="string";ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;var scriptDirectory="";function locateFile(path){if(Module["locateFile"]){return Module["locateFile"](path,scriptDirectory)}return scriptDirectory+path}var read_,readAsync,readBinary,setWindowTitle;var nodeFS;var nodePath;if(ENVIRONMENT_IS_NODE){if(ENVIRONMENT_IS_WORKER){scriptDirectory=require("path").dirname(scriptDirectory)+"/"}else{scriptDirectory=__dirname+"/"}read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require("fs");if(!nodePath)nodePath=require("path");filename=nodePath["normalize"](filename);return nodeFS["readFileSync"](filename,binary?null:"utf8")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process["argv"].length>1){thisProgram=process["argv"][1].replace(/\\/g,"/")}arguments_=process["argv"].slice(2);process["on"]("uncaughtException",function(ex){if(!(ex instanceof ExitStatus)){throw ex}});process["on"]("unhandledRejection",abort);quit_=function(status){process["exit"](status)};Module["inspect"]=function(){return"[Emscripten Module object]"}}else if(ENVIRONMENT_IS_SHELL){if(typeof read!="undefined"){read_=function shell_read(f){return read(f)}}readBinary=function readBinary(f){var data;if(typeof readbuffer==="function"){return new Uint8Array(readbuffer(f))}data=read(f,"binary");assert(typeof data==="object");return data};if(typeof scriptArgs!="undefined"){arguments_=scriptArgs}else if(typeof arguments!="undefined"){arguments_=arguments}if(typeof quit==="function"){quit_=function(status){quit(status)}}if(typeof print!=="undefined"){if(typeof console==="undefined")console={};console.log=print;console.warn=console.error=typeof printErr!=="undefined"?printErr:print}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){if(ENVIRONMENT_IS_WORKER){scriptDirectory=self.location.href}else if(typeof document!=="undefined"&&document.currentScript){scriptDirectory=document.currentScript.src}if(_scriptDir){scriptDirectory=_scriptDir}if(scriptDirectory.indexOf("blob:")!==0){scriptDirectory=scriptDirectory.substr(0,scriptDirectory.lastIndexOf("/")+1)}else{scriptDirectory=""}{read_=function(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.send(null);return xhr.responseText};if(ENVIRONMENT_IS_WORKER){readBinary=function(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.responseType="arraybuffer";xhr.send(null);return new Uint8Array(xhr.response)}}readAsync=function(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open("GET",url,true);xhr.responseType="arraybuffer";xhr.onload=function(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}onerror()};xhr.onerror=onerror;xhr.send(null)}}setWindowTitle=function(title){document.title=title}}else{}var out=Module["print"]||console.log.bind(console);var err=Module["printErr"]||console.warn.bind(console);for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=null;if(Module["arguments"])arguments_=Module["arguments"];if(Module["thisProgram"])thisProgram=Module["thisProgram"];if(Module["quit"])quit_=Module["quit"];var wasmBinary;if(Module["wasmBinary"])wasmBinary=Module["wasmBinary"];var noExitRuntime=Module["noExitRuntime"]||true;if(typeof WebAssembly!=="object"){abort("no native wasm support detected")}var wasmMemory;var ABORT=false;var EXITSTATUS;function assert(condition,text){if(!condition){abort("Assertion failed: "+text)}}var UTF8Decoder=typeof TextDecoder!=="undefined"?new TextDecoder("utf8"):undefined;function UTF8ArrayToString(heap,idx,maxBytesToRead){var endIdx=idx+maxBytesToRead;var endPtr=idx;while(heap[endPtr]&&!(endPtr>=endIdx))++endPtr;if(endPtr-idx>16&&heap.subarray&&UTF8Decoder){return UTF8Decoder.decode(heap.subarray(idx,endPtr))}else{var str="";while(idx>10,56320|ch&1023)}}}return str}function UTF8ToString(ptr,maxBytesToRead){return ptr?UTF8ArrayToString(HEAPU8,ptr,maxBytesToRead):""}function writeAsciiToMemory(str,buffer,dontAddNull){for(var i=0;i>0]=str.charCodeAt(i)}if(!dontAddNull)HEAP8[buffer>>0]=0}function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBufferAndViews(buf){buffer=buf;Module["HEAP8"]=HEAP8=new Int8Array(buf);Module["HEAP16"]=HEAP16=new Int16Array(buf);Module["HEAP32"]=HEAP32=new Int32Array(buf);Module["HEAPU8"]=HEAPU8=new Uint8Array(buf);Module["HEAPU16"]=HEAPU16=new Uint16Array(buf);Module["HEAPU32"]=HEAPU32=new Uint32Array(buf);Module["HEAPF32"]=HEAPF32=new Float32Array(buf);Module["HEAPF64"]=HEAPF64=new Float64Array(buf)}var INITIAL_MEMORY=Module["INITIAL_MEMORY"]||16777216;var wasmTable;var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;function preRun(){if(Module["preRun"]){if(typeof Module["preRun"]=="function")Module["preRun"]=[Module["preRun"]];while(Module["preRun"].length){addOnPreRun(Module["preRun"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function initRuntime(){runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){runtimeExited=true}function postRun(){if(Module["postRun"]){if(typeof Module["postRun"]=="function")Module["postRun"]=[Module["postRun"]];while(Module["postRun"].length){addOnPostRun(Module["postRun"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnInit(cb){__ATINIT__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module["preloadedImages"]={};Module["preloadedAudios"]={};function abort(what){if(Module["onAbort"]){Module["onAbort"](what)}what+="";err(what);ABORT=true;EXITSTATUS=1;what="abort("+what+"). Build with -s ASSERTIONS=1 for more info.";var e=new WebAssembly.RuntimeError(what);readyPromiseReject(e);throw e}function hasPrefix(str,prefix){return String.prototype.startsWith?str.startsWith(prefix):str.indexOf(prefix)===0}var dataURIPrefix="data:application/octet-stream;base64,";function isDataURI(filename){return hasPrefix(filename,dataURIPrefix)}var fileURIPrefix="file://";function isFileURI(filename){return hasPrefix(filename,fileURIPrefix)}var wasmBinaryFile="tflite-1-0-0.wasm";if(!isDataURI(wasmBinaryFile)){wasmBinaryFile=locateFile(wasmBinaryFile)}function getBinary(file){try{if(file==wasmBinaryFile&&wasmBinary){return new Uint8Array(wasmBinary)}if(readBinary){return readBinary(file)}else{throw"both async and sync fetching of the wasm failed"}}catch(err){abort(err)}}function getBinaryPromise(){if(!wasmBinary&&(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER)){if(typeof fetch==="function"&&!isFileURI(wasmBinaryFile)){return fetch(wasmBinaryFile,{credentials:"same-origin"}).then(function(response){if(!response["ok"]){throw"failed to load wasm binary file at '"+wasmBinaryFile+"'"}return response["arrayBuffer"]()}).catch(function(){return getBinary(wasmBinaryFile)})}else{if(readAsync){return new Promise(function(resolve,reject){readAsync(wasmBinaryFile,function(response){resolve(new Uint8Array(response))},reject)})}}}return Promise.resolve().then(function(){return getBinary(wasmBinaryFile)})}function createWasm(){var info={"a":asmLibraryArg};function receiveInstance(instance,module){var exports=instance.exports;Module["asm"]=exports;wasmMemory=Module["asm"]["q"];updateGlobalBufferAndViews(wasmMemory.buffer);wasmTable=Module["asm"]["D"];addOnInit(Module["asm"]["r"]);removeRunDependency("wasm-instantiate")}addRunDependency("wasm-instantiate");function receiveInstantiatedSource(output){receiveInstance(output["instance"])}function instantiateArrayBuffer(receiver){return getBinaryPromise().then(function(binary){var result=WebAssembly.instantiate(binary,info);return result}).then(receiver,function(reason){err("failed to asynchronously prepare wasm: "+reason);abort(reason)})}function instantiateAsync(){if(!wasmBinary&&typeof WebAssembly.instantiateStreaming==="function"&&!isDataURI(wasmBinaryFile)&&!isFileURI(wasmBinaryFile)&&typeof fetch==="function"){return fetch(wasmBinaryFile,{credentials:"same-origin"}).then(function(response){var result=WebAssembly.instantiateStreaming(response,info);return result.then(receiveInstantiatedSource,function(reason){err("wasm streaming compile failed: "+reason);err("falling back to ArrayBuffer instantiation");return instantiateArrayBuffer(receiveInstantiatedSource)})})}else{return instantiateArrayBuffer(receiveInstantiatedSource)}}if(Module["instantiateWasm"]){try{var exports=Module["instantiateWasm"](info,receiveInstance);return exports}catch(e){err("Module.instantiateWasm callback failed with error: "+e);return false}}instantiateAsync().catch(readyPromiseReject);return{}}function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback=="function"){callback(Module);continue}var func=callback.func;if(typeof func==="number"){if(callback.arg===undefined){wasmTable.get(func)()}else{wasmTable.get(func)(callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}var runtimeKeepaliveCounter=0;function keepRuntimeAlive(){return noExitRuntime||runtimeKeepaliveCounter>0}function _abort(){abort()}var _emscripten_get_now;if(ENVIRONMENT_IS_NODE){_emscripten_get_now=function(){var t=process["hrtime"]();return t[0]*1e3+t[1]/1e6}}else if(typeof dateNow!=="undefined"){_emscripten_get_now=dateNow}else _emscripten_get_now=function(){return performance.now()};var _emscripten_get_now_is_monotonic=true;function setErrNo(value){HEAP32[___errno_location()>>2]=value;return value}function _clock_gettime(clk_id,tp){var now;if(clk_id===0){now=Date.now()}else if((clk_id===1||clk_id===4)&&_emscripten_get_now_is_monotonic){now=_emscripten_get_now()}else{setErrNo(28);return-1}HEAP32[tp>>2]=now/1e3|0;HEAP32[tp+4>>2]=now%1e3*1e3*1e3|0;return 0}function _dlopen(filename,flag){abort("To use dlopen, you need to use Emscripten's linking support, see https://github.com/emscripten-core/emscripten/wiki/Linking")}function _dlsym(handle,symbol){abort("To use dlopen, you need to use Emscripten's linking support, see https://github.com/emscripten-core/emscripten/wiki/Linking")}function _emscripten_get_heap_max(){return 2147483648}function _emscripten_memcpy_big(dest,src,num){HEAPU8.copyWithin(dest,src,src+num)}function emscripten_realloc_buffer(size){try{wasmMemory.grow(size-buffer.byteLength+65535>>>16);updateGlobalBufferAndViews(wasmMemory.buffer);return 1}catch(e){}}function _emscripten_resize_heap(requestedSize){var oldSize=HEAPU8.length;var maxHeapSize=2147483648;if(requestedSize>maxHeapSize){return false}for(var cutDown=1;cutDown<=4;cutDown*=2){var overGrownHeapSize=oldSize*(1+.2/cutDown);overGrownHeapSize=Math.min(overGrownHeapSize,requestedSize+100663296);var newSize=Math.min(maxHeapSize,alignUp(Math.max(requestedSize,overGrownHeapSize),65536));var replacement=emscripten_realloc_buffer(newSize);if(replacement){return true}}return false}function _emscripten_thread_sleep(msecs){var start=_emscripten_get_now();while(_emscripten_get_now()-start0){return}preRun();if(runDependencies>0){return}function doRun(){if(calledRun)return;calledRun=true;Module["calledRun"]=true;if(ABORT)return;initRuntime();preMain();readyPromiseResolve(Module);if(Module["onRuntimeInitialized"])Module["onRuntimeInitialized"]();postRun()}if(Module["setStatus"]){Module["setStatus"]("Running...");setTimeout(function(){setTimeout(function(){Module["setStatus"]("")},1);doRun()},1)}else{doRun()}}Module["run"]=run;function exit(status,implicit){EXITSTATUS=status;if(implicit&&keepRuntimeAlive()&&status===0){return}if(keepRuntimeAlive()){}else{exitRuntime();if(Module["onExit"])Module["onExit"](status);ABORT=true}quit_(status,new ExitStatus(status))}if(Module["preInit"]){if(typeof Module["preInit"]=="function")Module["preInit"]=[Module["preInit"]];while(Module["preInit"].length>0){Module["preInit"].pop()()}}run(); + + + return createTwilioTFLiteModule.ready +} +); +})(); +if (typeof exports === 'object' && typeof module === 'object') + module.exports = createTwilioTFLiteModule; +else if (typeof define === 'function' && define['amd']) + define([], function() { return createTwilioTFLiteModule; }); +else if (typeof exports === 'object') + exports["createTwilioTFLiteModule"] = createTwilioTFLiteModule; diff --git a/dist/build/tflite-1-0-0.wasm b/dist/build/tflite-1-0-0.wasm new file mode 100755 index 0000000..ead8e52 Binary files /dev/null and b/dist/build/tflite-1-0-0.wasm differ diff --git a/dist/build/tflite-simd-1-0-0.js b/dist/build/tflite-simd-1-0-0.js new file mode 100644 index 0000000..d68e168 --- /dev/null +++ b/dist/build/tflite-simd-1-0-0.js @@ -0,0 +1,21 @@ + +var createTwilioTFLiteSIMDModule = (function() { + var _scriptDir = typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined; + if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename; + return ( +function(createTwilioTFLiteSIMDModule) { + createTwilioTFLiteSIMDModule = createTwilioTFLiteSIMDModule || {}; + +var Module=typeof createTwilioTFLiteSIMDModule!=="undefined"?createTwilioTFLiteSIMDModule:{};var readyPromiseResolve,readyPromiseReject;Module["ready"]=new Promise(function(resolve,reject){readyPromiseResolve=resolve;readyPromiseReject=reject});var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var arguments_=[];var thisProgram="./this.program";var quit_=function(status,toThrow){throw toThrow};var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;ENVIRONMENT_IS_WEB=typeof window==="object";ENVIRONMENT_IS_WORKER=typeof importScripts==="function";ENVIRONMENT_IS_NODE=typeof process==="object"&&typeof process.versions==="object"&&typeof process.versions.node==="string";ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;var scriptDirectory="";function locateFile(path){if(Module["locateFile"]){return Module["locateFile"](path,scriptDirectory)}return scriptDirectory+path}var read_,readAsync,readBinary,setWindowTitle;var nodeFS;var nodePath;if(ENVIRONMENT_IS_NODE){if(ENVIRONMENT_IS_WORKER){scriptDirectory=require("path").dirname(scriptDirectory)+"/"}else{scriptDirectory=__dirname+"/"}read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require("fs");if(!nodePath)nodePath=require("path");filename=nodePath["normalize"](filename);return nodeFS["readFileSync"](filename,binary?null:"utf8")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process["argv"].length>1){thisProgram=process["argv"][1].replace(/\\/g,"/")}arguments_=process["argv"].slice(2);process["on"]("uncaughtException",function(ex){if(!(ex instanceof ExitStatus)){throw ex}});process["on"]("unhandledRejection",abort);quit_=function(status){process["exit"](status)};Module["inspect"]=function(){return"[Emscripten Module object]"}}else if(ENVIRONMENT_IS_SHELL){if(typeof read!="undefined"){read_=function shell_read(f){return read(f)}}readBinary=function readBinary(f){var data;if(typeof readbuffer==="function"){return new Uint8Array(readbuffer(f))}data=read(f,"binary");assert(typeof data==="object");return data};if(typeof scriptArgs!="undefined"){arguments_=scriptArgs}else if(typeof arguments!="undefined"){arguments_=arguments}if(typeof quit==="function"){quit_=function(status){quit(status)}}if(typeof print!=="undefined"){if(typeof console==="undefined")console={};console.log=print;console.warn=console.error=typeof printErr!=="undefined"?printErr:print}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){if(ENVIRONMENT_IS_WORKER){scriptDirectory=self.location.href}else if(typeof document!=="undefined"&&document.currentScript){scriptDirectory=document.currentScript.src}if(_scriptDir){scriptDirectory=_scriptDir}if(scriptDirectory.indexOf("blob:")!==0){scriptDirectory=scriptDirectory.substr(0,scriptDirectory.lastIndexOf("/")+1)}else{scriptDirectory=""}{read_=function(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.send(null);return xhr.responseText};if(ENVIRONMENT_IS_WORKER){readBinary=function(url){var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.responseType="arraybuffer";xhr.send(null);return new Uint8Array(xhr.response)}}readAsync=function(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open("GET",url,true);xhr.responseType="arraybuffer";xhr.onload=function(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}onerror()};xhr.onerror=onerror;xhr.send(null)}}setWindowTitle=function(title){document.title=title}}else{}var out=Module["print"]||console.log.bind(console);var err=Module["printErr"]||console.warn.bind(console);for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=null;if(Module["arguments"])arguments_=Module["arguments"];if(Module["thisProgram"])thisProgram=Module["thisProgram"];if(Module["quit"])quit_=Module["quit"];var wasmBinary;if(Module["wasmBinary"])wasmBinary=Module["wasmBinary"];var noExitRuntime=Module["noExitRuntime"]||true;if(typeof WebAssembly!=="object"){abort("no native wasm support detected")}var wasmMemory;var ABORT=false;var EXITSTATUS;function assert(condition,text){if(!condition){abort("Assertion failed: "+text)}}var UTF8Decoder=typeof TextDecoder!=="undefined"?new TextDecoder("utf8"):undefined;function UTF8ArrayToString(heap,idx,maxBytesToRead){var endIdx=idx+maxBytesToRead;var endPtr=idx;while(heap[endPtr]&&!(endPtr>=endIdx))++endPtr;if(endPtr-idx>16&&heap.subarray&&UTF8Decoder){return UTF8Decoder.decode(heap.subarray(idx,endPtr))}else{var str="";while(idx>10,56320|ch&1023)}}}return str}function UTF8ToString(ptr,maxBytesToRead){return ptr?UTF8ArrayToString(HEAPU8,ptr,maxBytesToRead):""}function writeAsciiToMemory(str,buffer,dontAddNull){for(var i=0;i>0]=str.charCodeAt(i)}if(!dontAddNull)HEAP8[buffer>>0]=0}function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBufferAndViews(buf){buffer=buf;Module["HEAP8"]=HEAP8=new Int8Array(buf);Module["HEAP16"]=HEAP16=new Int16Array(buf);Module["HEAP32"]=HEAP32=new Int32Array(buf);Module["HEAPU8"]=HEAPU8=new Uint8Array(buf);Module["HEAPU16"]=HEAPU16=new Uint16Array(buf);Module["HEAPU32"]=HEAPU32=new Uint32Array(buf);Module["HEAPF32"]=HEAPF32=new Float32Array(buf);Module["HEAPF64"]=HEAPF64=new Float64Array(buf)}var INITIAL_MEMORY=Module["INITIAL_MEMORY"]||16777216;var wasmTable;var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;function preRun(){if(Module["preRun"]){if(typeof Module["preRun"]=="function")Module["preRun"]=[Module["preRun"]];while(Module["preRun"].length){addOnPreRun(Module["preRun"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function initRuntime(){runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){runtimeExited=true}function postRun(){if(Module["postRun"]){if(typeof Module["postRun"]=="function")Module["postRun"]=[Module["postRun"]];while(Module["postRun"].length){addOnPostRun(Module["postRun"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnInit(cb){__ATINIT__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module["preloadedImages"]={};Module["preloadedAudios"]={};function abort(what){if(Module["onAbort"]){Module["onAbort"](what)}what+="";err(what);ABORT=true;EXITSTATUS=1;what="abort("+what+"). Build with -s ASSERTIONS=1 for more info.";var e=new WebAssembly.RuntimeError(what);readyPromiseReject(e);throw e}function hasPrefix(str,prefix){return String.prototype.startsWith?str.startsWith(prefix):str.indexOf(prefix)===0}var dataURIPrefix="data:application/octet-stream;base64,";function isDataURI(filename){return hasPrefix(filename,dataURIPrefix)}var fileURIPrefix="file://";function isFileURI(filename){return hasPrefix(filename,fileURIPrefix)}var wasmBinaryFile="tflite-simd-1-0-0.wasm";if(!isDataURI(wasmBinaryFile)){wasmBinaryFile=locateFile(wasmBinaryFile)}function getBinary(file){try{if(file==wasmBinaryFile&&wasmBinary){return new Uint8Array(wasmBinary)}if(readBinary){return readBinary(file)}else{throw"both async and sync fetching of the wasm failed"}}catch(err){abort(err)}}function getBinaryPromise(){if(!wasmBinary&&(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER)){if(typeof fetch==="function"&&!isFileURI(wasmBinaryFile)){return fetch(wasmBinaryFile,{credentials:"same-origin"}).then(function(response){if(!response["ok"]){throw"failed to load wasm binary file at '"+wasmBinaryFile+"'"}return response["arrayBuffer"]()}).catch(function(){return getBinary(wasmBinaryFile)})}else{if(readAsync){return new Promise(function(resolve,reject){readAsync(wasmBinaryFile,function(response){resolve(new Uint8Array(response))},reject)})}}}return Promise.resolve().then(function(){return getBinary(wasmBinaryFile)})}function createWasm(){var info={"a":asmLibraryArg};function receiveInstance(instance,module){var exports=instance.exports;Module["asm"]=exports;wasmMemory=Module["asm"]["q"];updateGlobalBufferAndViews(wasmMemory.buffer);wasmTable=Module["asm"]["D"];addOnInit(Module["asm"]["r"]);removeRunDependency("wasm-instantiate")}addRunDependency("wasm-instantiate");function receiveInstantiatedSource(output){receiveInstance(output["instance"])}function instantiateArrayBuffer(receiver){return getBinaryPromise().then(function(binary){var result=WebAssembly.instantiate(binary,info);return result}).then(receiver,function(reason){err("failed to asynchronously prepare wasm: "+reason);abort(reason)})}function instantiateAsync(){if(!wasmBinary&&typeof WebAssembly.instantiateStreaming==="function"&&!isDataURI(wasmBinaryFile)&&!isFileURI(wasmBinaryFile)&&typeof fetch==="function"){return fetch(wasmBinaryFile,{credentials:"same-origin"}).then(function(response){var result=WebAssembly.instantiateStreaming(response,info);return result.then(receiveInstantiatedSource,function(reason){err("wasm streaming compile failed: "+reason);err("falling back to ArrayBuffer instantiation");return instantiateArrayBuffer(receiveInstantiatedSource)})})}else{return instantiateArrayBuffer(receiveInstantiatedSource)}}if(Module["instantiateWasm"]){try{var exports=Module["instantiateWasm"](info,receiveInstance);return exports}catch(e){err("Module.instantiateWasm callback failed with error: "+e);return false}}instantiateAsync().catch(readyPromiseReject);return{}}function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback=="function"){callback(Module);continue}var func=callback.func;if(typeof func==="number"){if(callback.arg===undefined){wasmTable.get(func)()}else{wasmTable.get(func)(callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}var runtimeKeepaliveCounter=0;function keepRuntimeAlive(){return noExitRuntime||runtimeKeepaliveCounter>0}function _abort(){abort()}var _emscripten_get_now;if(ENVIRONMENT_IS_NODE){_emscripten_get_now=function(){var t=process["hrtime"]();return t[0]*1e3+t[1]/1e6}}else if(typeof dateNow!=="undefined"){_emscripten_get_now=dateNow}else _emscripten_get_now=function(){return performance.now()};var _emscripten_get_now_is_monotonic=true;function setErrNo(value){HEAP32[___errno_location()>>2]=value;return value}function _clock_gettime(clk_id,tp){var now;if(clk_id===0){now=Date.now()}else if((clk_id===1||clk_id===4)&&_emscripten_get_now_is_monotonic){now=_emscripten_get_now()}else{setErrNo(28);return-1}HEAP32[tp>>2]=now/1e3|0;HEAP32[tp+4>>2]=now%1e3*1e3*1e3|0;return 0}function _dlopen(filename,flag){abort("To use dlopen, you need to use Emscripten's linking support, see https://github.com/emscripten-core/emscripten/wiki/Linking")}function _dlsym(handle,symbol){abort("To use dlopen, you need to use Emscripten's linking support, see https://github.com/emscripten-core/emscripten/wiki/Linking")}function _emscripten_get_heap_max(){return 2147483648}function _emscripten_memcpy_big(dest,src,num){HEAPU8.copyWithin(dest,src,src+num)}function emscripten_realloc_buffer(size){try{wasmMemory.grow(size-buffer.byteLength+65535>>>16);updateGlobalBufferAndViews(wasmMemory.buffer);return 1}catch(e){}}function _emscripten_resize_heap(requestedSize){var oldSize=HEAPU8.length;var maxHeapSize=2147483648;if(requestedSize>maxHeapSize){return false}for(var cutDown=1;cutDown<=4;cutDown*=2){var overGrownHeapSize=oldSize*(1+.2/cutDown);overGrownHeapSize=Math.min(overGrownHeapSize,requestedSize+100663296);var newSize=Math.min(maxHeapSize,alignUp(Math.max(requestedSize,overGrownHeapSize),65536));var replacement=emscripten_realloc_buffer(newSize);if(replacement){return true}}return false}function _emscripten_thread_sleep(msecs){var start=_emscripten_get_now();while(_emscripten_get_now()-start0){return}preRun();if(runDependencies>0){return}function doRun(){if(calledRun)return;calledRun=true;Module["calledRun"]=true;if(ABORT)return;initRuntime();preMain();readyPromiseResolve(Module);if(Module["onRuntimeInitialized"])Module["onRuntimeInitialized"]();postRun()}if(Module["setStatus"]){Module["setStatus"]("Running...");setTimeout(function(){setTimeout(function(){Module["setStatus"]("")},1);doRun()},1)}else{doRun()}}Module["run"]=run;function exit(status,implicit){EXITSTATUS=status;if(implicit&&keepRuntimeAlive()&&status===0){return}if(keepRuntimeAlive()){}else{exitRuntime();if(Module["onExit"])Module["onExit"](status);ABORT=true}quit_(status,new ExitStatus(status))}if(Module["preInit"]){if(typeof Module["preInit"]=="function")Module["preInit"]=[Module["preInit"]];while(Module["preInit"].length>0){Module["preInit"].pop()()}}run(); + + + return createTwilioTFLiteSIMDModule.ready +} +); +})(); +if (typeof exports === 'object' && typeof module === 'object') + module.exports = createTwilioTFLiteSIMDModule; +else if (typeof define === 'function' && define['amd']) + define([], function() { return createTwilioTFLiteSIMDModule; }); +else if (typeof exports === 'object') + exports["createTwilioTFLiteSIMDModule"] = createTwilioTFLiteSIMDModule; diff --git a/dist/build/tflite-simd-1-0-0.wasm b/dist/build/tflite-simd-1-0-0.wasm new file mode 100755 index 0000000..bb24d2d Binary files /dev/null and b/dist/build/tflite-simd-1-0-0.wasm differ diff --git a/dist/build/twilio-video-processors.js b/dist/build/twilio-video-processors.js new file mode 100644 index 0000000..952b68a --- /dev/null +++ b/dist/build/twilio-video-processors.js @@ -0,0 +1,69178 @@ +/*! twilio-video-processors.js 1.0.1 + +The following license applies to all parts of this software except as +documented below. + + Copyright (C) 2021 Twilio Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + 3. Neither the name of Twilio nor the names of its contributors may + be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ +(function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } + if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } + if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } + if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } + if (t[2]) _.ops.pop(); + _.trys.pop(); continue; + } + op = body.call(thisArg, _); + } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } + if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; + } +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.BackgroundProcessor = void 0; +require("@tensorflow/tfjs-backend-webgl"); +require("@tensorflow/tfjs-backend-cpu"); +var body_pix_1 = require("@tensorflow-models/body-pix"); +var Processor_1 = require("../Processor"); +var Benchmark_1 = require("../../utils/Benchmark"); +var version_1 = require("../../utils/version"); +var constants_1 = require("../../constants"); +/** + * @private + */ +var BackgroundProcessor = /** @class */ (function (_super) { + __extends(BackgroundProcessor, _super); + function BackgroundProcessor(options) { + var _this = _super.call(this) || this; + _this._currentMask = new Uint8ClampedArray(); + _this._debounce = constants_1.DEBOUNCE; + _this._dummyImageData = new ImageData(1, 1); + _this._historyCount = constants_1.HISTORY_COUNT; + _this._inferenceConfig = constants_1.INFERENCE_CONFIG; + _this._inferenceDimensions = constants_1.WASM_INFERENCE_DIMENSIONS; + _this._inputMemoryOffset = 0; + // tslint:disable-next-line no-unused-variable + _this._isSimdEnabled = null; + _this._maskBlurRadius = constants_1.MASK_BLUR_RADIUS; + _this._maskUsageCounter = 0; + _this._outputMemoryOffset = 0; + _this._personProbabilityThreshold = constants_1.PERSON_PROBABILITY_THRESHOLD; + // tslint:disable-next-line no-unused-variable + _this._version = version_1.version; + if (typeof options.assetsPath !== 'string') { + throw new Error('assetsPath parameter is missing'); + } + var assetsPath = options.assetsPath; + if (assetsPath && assetsPath[assetsPath.length - 1] !== '/') { + assetsPath += '/'; + } + _this.maskBlurRadius = options.maskBlurRadius; + _this._assetsPath = assetsPath; + _this._debounce = options.debounce || constants_1.DEBOUNCE; + _this._historyCount = options.historyCount || constants_1.HISTORY_COUNT; + _this._inferenceConfig = options.inferenceConfig || constants_1.INFERENCE_CONFIG; + _this._personProbabilityThreshold = options.personProbabilityThreshold || constants_1.PERSON_PROBABILITY_THRESHOLD; + _this._useWasm = typeof options.useWasm === 'boolean' ? options.useWasm : true; + _this._inferenceDimensions = options.inferenceDimensions || + (_this._useWasm ? constants_1.WASM_INFERENCE_DIMENSIONS : constants_1.BODYPIX_INFERENCE_DIMENSIONS); + _this._benchmark = new Benchmark_1.Benchmark(); + _this._inputCanvas = document.createElement('canvas'); + _this._inputContext = _this._inputCanvas.getContext('2d'); + _this._maskCanvas = new OffscreenCanvas(1, 1); + _this._maskContext = _this._maskCanvas.getContext('2d'); + _this._outputCanvas = document.createElement('canvas'); + _this._outputContext = _this._outputCanvas.getContext('2d'); + _this._masks = []; + return _this; + } + BackgroundProcessor._loadModel = function (config) { + if (config === void 0) { config = constants_1.MODEL_CONFIG; } + return __awaiter(this, void 0, void 0, function () { + var _a; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + _a = BackgroundProcessor; + return [4 /*yield*/, body_pix_1.load(config) + .catch(function (error) { return console.error('Unable to load model.', error); })]; + case 1: + _a._model = (_b.sent()) || null; + return [2 /*return*/]; + } + }); + }); + }; + Object.defineProperty(BackgroundProcessor.prototype, "maskBlurRadius", { + /** + * The current blur radius when smoothing out the edges of the person's mask. + */ + get: function () { + return this._maskBlurRadius; + }, + /** + * Set a new blur radius to be used when smoothing out the edges of the person's mask. + */ + set: function (radius) { + if (typeof radius !== 'number' || radius < 0) { + console.warn("Valid mask blur radius not found. Using " + constants_1.MASK_BLUR_RADIUS + " as default."); + radius = constants_1.MASK_BLUR_RADIUS; + } + this._maskBlurRadius = radius; + }, + enumerable: false, + configurable: true + }); + /** + * Load the segmentation model. + * Call this method before attaching the processor to ensure + * video frames are processed correctly. + */ + BackgroundProcessor.prototype.loadModel = function () { + return __awaiter(this, void 0, void 0, function () { + var _a, tflite, modelResponse, model, modelBufferOffset; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: return [4 /*yield*/, Promise.all([ + BackgroundProcessor._loadModel(), + this._loadTwilioTfLite(), + fetch(this._assetsPath + constants_1.MODEL_NAME), + ])]; + case 1: + _a = _b.sent(), tflite = _a[1], modelResponse = _a[2]; + return [4 /*yield*/, modelResponse.arrayBuffer()]; + case 2: + model = _b.sent(); + modelBufferOffset = tflite._getModelBufferMemoryOffset(); + tflite.HEAPU8.set(new Uint8Array(model), modelBufferOffset); + tflite._loadModel(model.byteLength); + this._inputMemoryOffset = tflite._getInputMemoryOffset() / 4; + this._outputMemoryOffset = tflite._getOutputMemoryOffset() / 4; + this._tflite = tflite; + return [2 /*return*/]; + } + }); + }); + }; + /** + * Apply a transform to the background of an input video frame and leaving + * the foreground (person(s)) untouched. Any exception detected will + * result in the frame being dropped. + * @param inputFrameBuffer - The source of the input frame to process. + * @param outputFrameBuffer - The output frame buffer to use to draw the processed frame. + */ + BackgroundProcessor.prototype.processFrame = function (inputFrameBuffer, outputFrameBuffer) { + return __awaiter(this, void 0, void 0, function () { + var inputFrame, captureWidth, captureHeight, _a, inferenceWidth, inferenceHeight, reInitDummyImage, personMask; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + if (!BackgroundProcessor._model || !this._tflite) { + return [2 /*return*/]; + } + if (!inputFrameBuffer || !outputFrameBuffer) { + throw new Error('Missing input or output frame buffer'); + } + this._benchmark.end('captureFrameDelay'); + this._benchmark.start('processFrameDelay'); + inputFrame = inputFrameBuffer; + captureWidth = inputFrame.width, captureHeight = inputFrame.height; + _a = this._inferenceDimensions, inferenceWidth = _a.width, inferenceHeight = _a.height; + if (this._outputCanvas !== outputFrameBuffer) { + this._outputCanvas = outputFrameBuffer; + this._outputContext = outputFrameBuffer.getContext('2d'); + } + reInitDummyImage = false; + if (this._inputCanvas.width !== inferenceWidth) { + this._inputCanvas.width = inferenceWidth; + this._maskCanvas.width = inferenceWidth; + reInitDummyImage = true; + } + if (this._inputCanvas.height !== inferenceHeight) { + this._inputCanvas.height = inferenceHeight; + this._maskCanvas.height = inferenceHeight; + reInitDummyImage = true; + } + if (reInitDummyImage) { + this._dummyImageData = new ImageData(new Uint8ClampedArray(inferenceWidth * inferenceHeight * 4), inferenceWidth, inferenceHeight); + } + return [4 /*yield*/, this._createPersonMask(inputFrame)]; + case 1: + personMask = _b.sent(); + this._benchmark.start('imageCompositionDelay'); + this._maskContext.putImageData(personMask, 0, 0); + this._outputContext.save(); + this._outputContext.filter = "blur(" + this._maskBlurRadius + "px)"; + this._outputContext.globalCompositeOperation = 'copy'; + this._outputContext.drawImage(this._maskCanvas, 0, 0, captureWidth, captureHeight); + this._outputContext.filter = 'none'; + this._outputContext.globalCompositeOperation = 'source-in'; + this._outputContext.drawImage(inputFrame, 0, 0, captureWidth, captureHeight); + this._outputContext.globalCompositeOperation = 'destination-over'; + this._setBackground(inputFrame); + this._outputContext.restore(); + this._benchmark.end('imageCompositionDelay'); + this._benchmark.end('processFrameDelay'); + this._benchmark.end('totalProcessingDelay'); + // NOTE (csantos): Start the benchmark from here so we can include the delay from the Video sdk + // for a more accurate fps + this._benchmark.start('totalProcessingDelay'); + this._benchmark.start('captureFrameDelay'); + return [2 /*return*/]; + } + }); + }); + }; + BackgroundProcessor.prototype._addMask = function (mask) { + if (this._masks.length >= this._historyCount) { + this._masks.splice(0, this._masks.length - this._historyCount + 1); + } + this._masks.push(mask); + }; + BackgroundProcessor.prototype._applyAlpha = function (imageData) { + var weightedSum = this._masks.reduce(function (sum, mask, j) { return sum + (j + 1) * (j + 1); }, 0); + var pixels = imageData.height * imageData.width; + var _loop_1 = function (i) { + var w = this_1._masks.reduce(function (sum, mask, j) { return sum + mask[i] * (j + 1) * (j + 1); }, 0) / weightedSum; + imageData.data[i * 4 + 3] = Math.round(w * 255); + }; + var this_1 = this; + for (var i = 0; i < pixels; i++) { + _loop_1(i); + } + }; + BackgroundProcessor.prototype._createPersonMask = function (inputFrame) { + return __awaiter(this, void 0, void 0, function () { + var imageData, shouldRunInference, _a, _b; + return __generator(this, function (_c) { + switch (_c.label) { + case 0: + imageData = this._dummyImageData; + shouldRunInference = this._maskUsageCounter < 1; + this._benchmark.start('inputImageResizeDelay'); + if (shouldRunInference) { + imageData = this._getResizedInputImageData(inputFrame); + } + this._benchmark.end('inputImageResizeDelay'); + this._benchmark.start('segmentationDelay'); + if (!shouldRunInference) return [3 /*break*/, 4]; + _a = this; + if (!this._useWasm) return [3 /*break*/, 1]; + _b = this._runTwilioTfLiteInference(imageData); + return [3 /*break*/, 3]; + case 1: return [4 /*yield*/, this._runBodyPixInference(imageData)]; + case 2: + _b = _c.sent(); + _c.label = 3; + case 3: + _a._currentMask = _b; + this._maskUsageCounter = this._debounce; + _c.label = 4; + case 4: + this._addMask(this._currentMask); + this._applyAlpha(imageData); + this._maskUsageCounter--; + this._benchmark.end('segmentationDelay'); + return [2 /*return*/, imageData]; + } + }); + }); + }; + BackgroundProcessor.prototype._getResizedInputImageData = function (inputFrame) { + var _a = this._inputCanvas, width = _a.width, height = _a.height; + this._inputContext.drawImage(inputFrame, 0, 0, width, height); + var imageData = this._inputContext.getImageData(0, 0, width, height); + return imageData; + }; + BackgroundProcessor.prototype._loadJs = function (url) { + return new Promise(function (resolve, reject) { + var script = document.createElement('script'); + script.onload = function () { return resolve(); }; + script.onerror = reject; + document.head.append(script); + script.src = url; + }); + }; + BackgroundProcessor.prototype._loadTwilioTfLite = function () { + return __awaiter(this, void 0, void 0, function () { + var tflite, _a; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: return [4 /*yield*/, this._loadJs(this._assetsPath + constants_1.TFLITE_SIMD_LOADER_NAME)]; + case 1: + _b.sent(); + _b.label = 2; + case 2: + _b.trys.push([2, 4, , 7]); + return [4 /*yield*/, window.createTwilioTFLiteSIMDModule()]; + case 3: + tflite = _b.sent(); + this._isSimdEnabled = true; + return [3 /*break*/, 7]; + case 4: + _a = _b.sent(); + console.warn('SIMD not supported. You may experience poor quality of background replacement.'); + return [4 /*yield*/, this._loadJs(this._assetsPath + constants_1.TFLITE_LOADER_NAME)]; + case 5: + _b.sent(); + return [4 /*yield*/, window.createTwilioTFLiteModule()]; + case 6: + tflite = _b.sent(); + this._isSimdEnabled = false; + return [3 /*break*/, 7]; + case 7: return [2 /*return*/, tflite]; + } + }); + }); + }; + BackgroundProcessor.prototype._runBodyPixInference = function (inputImage) { + return __awaiter(this, void 0, void 0, function () { + var segment; + return __generator(this, function (_a) { + switch (_a.label) { + case 0: return [4 /*yield*/, BackgroundProcessor._model.segmentPerson(inputImage, this._inferenceConfig)]; + case 1: + segment = _a.sent(); + return [2 /*return*/, segment.data]; + } + }); + }); + }; + BackgroundProcessor.prototype._runTwilioTfLiteInference = function (inputImage) { + var _a = this, _b = _a._inferenceDimensions, width = _b.width, height = _b.height, offset = _a._inputMemoryOffset, tflite = _a._tflite; + var pixels = width * height; + for (var i = 0; i < pixels; i++) { + tflite.HEAPF32[offset + i * 3] = inputImage.data[i * 4] / 255; + tflite.HEAPF32[offset + i * 3 + 1] = inputImage.data[i * 4 + 1] / 255; + tflite.HEAPF32[offset + i * 3 + 2] = inputImage.data[i * 4 + 2] / 255; + } + tflite._runInference(); + var inferenceData = new Uint8ClampedArray(pixels * 4); + for (var i = 0; i < pixels; i++) { + var personProbability = tflite.HEAPF32[this._outputMemoryOffset + i]; + inferenceData[i] = Number(personProbability >= this._personProbabilityThreshold) * personProbability; + } + return inferenceData; + }; + BackgroundProcessor._model = null; + return BackgroundProcessor; +}(Processor_1.Processor)); +exports.BackgroundProcessor = BackgroundProcessor; + +},{"../../constants":1,"../../utils/Benchmark":8,"../../utils/version":10,"../Processor":3,"@tensorflow-models/body-pix":15,"@tensorflow/tfjs-backend-cpu":32,"@tensorflow/tfjs-backend-webgl":33}],5:[function(require,module,exports){ +"use strict"; +var __extends = (this && this.__extends) || (function () { + var extendStatics = function (d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; + return extendStatics(d, b); + }; + return function (d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.GaussianBlurBackgroundProcessor = void 0; +var BackgroundProcessor_1 = require("./BackgroundProcessor"); +var constants_1 = require("../../constants"); +/** + * The GaussianBlurBackgroundProcessor, when added to a VideoTrack, + * applies a gaussian blur filter on the background in each video frame + * and leaves the foreground (person(s)) untouched. Each instance of + * GaussianBlurBackgroundProcessor should be added to only one VideoTrack + * at a time to prevent overlapping of image data from multiple VideoTracks. + * + * @example + * + * ```ts + * import { createLocalVideoTrack } from 'twilio-video'; + * import { GaussianBlurBackgroundProcessor } from '@twilio/video-processors'; + * + * const blurBackground = new GaussianBlurBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets' + * }); + * + * blurBackground.loadModel().then(() => { + * createLocalVideoTrack({ + * width: 640, + * height: 480, + * frameRate: 24 + * }).then(track => { + * track.addProcessor(blurBackground); + * }); + * }); + * ``` + */ +var GaussianBlurBackgroundProcessor = /** @class */ (function (_super) { + __extends(GaussianBlurBackgroundProcessor, _super); + /** + * Construct a GaussianBlurBackgroundProcessor. Default values will be used for + * any missing properties in [[GaussianBlurBackgroundProcessorOptions]], and + * invalid properties will be ignored. + */ + function GaussianBlurBackgroundProcessor(options) { + var _this = _super.call(this, options) || this; + _this._blurFilterRadius = constants_1.BLUR_FILTER_RADIUS; + // tslint:disable-next-line no-unused-variable + _this._name = 'GaussianBlurBackgroundProcessor'; + _this.blurFilterRadius = options.blurFilterRadius; + return _this; + } + Object.defineProperty(GaussianBlurBackgroundProcessor.prototype, "blurFilterRadius", { + /** + * The current background blur filter radius in pixels. + */ + get: function () { + return this._blurFilterRadius; + }, + /** + * Set a new background blur filter radius in pixels. + */ + set: function (radius) { + if (!radius) { + console.warn("Valid blur filter radius not found. Using " + constants_1.BLUR_FILTER_RADIUS + " as default."); + radius = constants_1.BLUR_FILTER_RADIUS; + } + this._blurFilterRadius = radius; + }, + enumerable: false, + configurable: true + }); + GaussianBlurBackgroundProcessor.prototype._setBackground = function (inputFrame) { + this._outputContext.filter = "blur(" + this._blurFilterRadius + "px)"; + this._outputContext.drawImage(inputFrame, 0, 0); + }; + return GaussianBlurBackgroundProcessor; +}(BackgroundProcessor_1.BackgroundProcessor)); +exports.GaussianBlurBackgroundProcessor = GaussianBlurBackgroundProcessor; + +},{"../../constants":1,"./BackgroundProcessor":4}],6:[function(require,module,exports){ +"use strict"; +var __extends = (this && this.__extends) || (function () { + var extendStatics = function (d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; + return extendStatics(d, b); + }; + return function (d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.VirtualBackgroundProcessor = void 0; +var BackgroundProcessor_1 = require("./BackgroundProcessor"); +var types_1 = require("../../types"); +/** + * The VirtualBackgroundProcessor, when added to a VideoTrack, + * replaces the background in each video frame with a given image, + * and leaves the foreground (person(s)) untouched. Each instance of + * VirtualBackgroundProcessor should be added to only one VideoTrack + * at a time to prevent overlapping of image data from multiple VideoTracks. + * + * @example + * + * ```ts + * import { createLocalVideoTrack } from 'twilio-video'; + * import { VirtualBackgroundProcessor } from '@twilio/video-processors'; + * + * let virtualBackground; + * const img = new Image(); + * + * img.onload = () => { + * virtualBackground = new VirtualBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets', + * backgroundImage: img, + * }); + * + * virtualBackground.loadModel().then(() => { + * createLocalVideoTrack({ + * width: 640, + * height: 480, + * frameRate: 24 + * }).then(track => { + * track.addProcessor(virtualBackground); + * }); + * }); + * }; + * img.src = '/background.jpg'; + * ``` + */ +var VirtualBackgroundProcessor = /** @class */ (function (_super) { + __extends(VirtualBackgroundProcessor, _super); + /** + * Construct a VirtualBackgroundProcessor. Default values will be used for + * any missing optional properties in [[VirtualBackgroundProcessorOptions]], + * and invalid properties will be ignored. + */ + function VirtualBackgroundProcessor(options) { + var _this = _super.call(this, options) || this; + // tslint:disable-next-line no-unused-variable + _this._name = 'VirtualBackgroundProcessor'; + _this.backgroundImage = options.backgroundImage; + _this.fitType = options.fitType; + return _this; + } + Object.defineProperty(VirtualBackgroundProcessor.prototype, "backgroundImage", { + /** + * The HTMLImageElement representing the current background image. + */ + get: function () { + return this._backgroundImage; + }, + /** + * Set an HTMLImageElement as the new background image. + * An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow + * [security guidelines](https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image) + * when loading the image from a different origin. Failing to do so will result to an empty output frame. + */ + set: function (image) { + if (!image || !image.complete || !image.naturalHeight) { + throw new Error('Invalid image. Make sure that the image is an HTMLImageElement and has been successfully loaded'); + } + this._backgroundImage = image; + }, + enumerable: false, + configurable: true + }); + Object.defineProperty(VirtualBackgroundProcessor.prototype, "fitType", { + /** + * The current [[ImageFit]] for positioning of the background image in the viewport. + */ + get: function () { + return this._fitType; + }, + /** + * Set a new [[ImageFit]] to be used for positioning the background image in the viewport. + */ + set: function (fitType) { + var validTypes = Object.keys(types_1.ImageFit); + if (!validTypes.includes(fitType)) { + console.warn("Valid fitType not found. Using '" + types_1.ImageFit.Fill + "' as default."); + fitType = types_1.ImageFit.Fill; + } + this._fitType = fitType; + }, + enumerable: false, + configurable: true + }); + VirtualBackgroundProcessor.prototype._setBackground = function () { + var img = this._backgroundImage; + var imageWidth = img.naturalWidth; + var imageHeight = img.naturalHeight; + var canvasWidth = this._outputCanvas.width; + var canvasHeight = this._outputCanvas.height; + if (this._fitType === types_1.ImageFit.Fill) { + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, 0, 0, canvasWidth, canvasHeight); + } + else if (this._fitType === types_1.ImageFit.None) { + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight); + } + else if (this._fitType === types_1.ImageFit.Contain) { + var _a = this._getFitPosition(imageWidth, imageHeight, canvasWidth, canvasHeight, types_1.ImageFit.Contain), x = _a.x, y = _a.y, w = _a.w, h = _a.h; + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, x, y, w, h); + } + else if (this._fitType === types_1.ImageFit.Cover) { + var _b = this._getFitPosition(imageWidth, imageHeight, canvasWidth, canvasHeight, types_1.ImageFit.Cover), x = _b.x, y = _b.y, w = _b.w, h = _b.h; + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, x, y, w, h); + } + }; + VirtualBackgroundProcessor.prototype._getFitPosition = function (contentWidth, contentHeight, viewportWidth, viewportHeight, type) { + // Calculate new content width to fit viewport width + var factor = viewportWidth / contentWidth; + var newContentWidth = viewportWidth; + var newContentHeight = factor * contentHeight; + // Scale down the resulting height and width more + // to fit viewport height if the content still exceeds it + if ((type === types_1.ImageFit.Contain && newContentHeight > viewportHeight) + || (type === types_1.ImageFit.Cover && viewportHeight > newContentHeight)) { + factor = viewportHeight / newContentHeight; + newContentWidth = factor * newContentWidth; + newContentHeight = viewportHeight; + } + // Calculate the destination top left corner to center the content + var x = (viewportWidth - newContentWidth) / 2; + var y = (viewportHeight - newContentHeight) / 2; + return { + x: x, y: y, + w: newContentWidth, + h: newContentHeight, + }; + }; + return VirtualBackgroundProcessor; +}(BackgroundProcessor_1.BackgroundProcessor)); +exports.VirtualBackgroundProcessor = VirtualBackgroundProcessor; + +},{"../../types":7,"./BackgroundProcessor":4}],7:[function(require,module,exports){ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.ImageFit = void 0; +/** + * ImageFit specifies the positioning of an image inside a viewport. + */ +var ImageFit; +(function (ImageFit) { + /** + * Scale the image up or down to fill the viewport while preserving the aspect ratio. + * The image will be fully visible but will add empty space in the viewport if + * aspect ratios do not match. + */ + ImageFit["Contain"] = "Contain"; + /** + * Scale the image to fill both height and width of the viewport while preserving + * the aspect ratio, but will crop the image if aspect ratios do not match. + */ + ImageFit["Cover"] = "Cover"; + /** + * Stretches the image to fill the viewport regardless of aspect ratio. + */ + ImageFit["Fill"] = "Fill"; + /** + * Ignore height and width and use the original size. + */ + ImageFit["None"] = "None"; +})(ImageFit = exports.ImageFit || (exports.ImageFit = {})); + +},{}],8:[function(require,module,exports){ +"use strict"; +var __assign = (this && this.__assign) || function () { + __assign = Object.assign || function(t) { + for (var s, i = 1, n = arguments.length; i < n; i++) { + s = arguments[i]; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) + t[p] = s[p]; + } + return t; + }; + return __assign.apply(this, arguments); +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Benchmark = void 0; +/** + * @private + */ +var Benchmark = /** @class */ (function () { + function Benchmark() { + this._timingCache = new Map(); + this._timings = new Map(); + } + Benchmark.prototype.end = function (name) { + var timing = this._timings.get(name); + if (!timing) { + return; + } + timing.end = Date.now(); + timing.delay = timing.end - timing.start; + this._save(name, __assign({}, timing)); + }; + Benchmark.prototype.getAverageDelay = function (name) { + var timingCache = this._timingCache.get(name); + if (!timingCache || !timingCache.length) { + return; + } + return timingCache.map(function (timing) { return timing.delay; }) + .reduce(function (total, value) { return total += value; }, 0) / timingCache.length; + }; + Benchmark.prototype.getNames = function () { + return Array.from(this._timingCache.keys()); + }; + Benchmark.prototype.getRate = function (name) { + var timingCache = this._timingCache.get(name); + if (!timingCache || timingCache.length < 2) { + return; + } + var totalDelay = timingCache[timingCache.length - 1].end - timingCache[0].start; + return (timingCache.length / totalDelay) * 1000; + }; + Benchmark.prototype.start = function (name) { + var timing = this._timings.get(name); + if (!timing) { + timing = {}; + this._timings.set(name, timing); + } + timing.start = Date.now(); + delete timing.end; + delete timing.delay; + }; + Benchmark.prototype._save = function (name, timing) { + var timingCache = this._timingCache.get(name); + if (!timingCache) { + timingCache = []; + this._timingCache.set(name, timingCache); + } + timingCache.push(timing); + if (timingCache.length > Benchmark.cacheSize) { + timingCache.splice(0, timingCache.length - Benchmark.cacheSize); + } + }; + // NOTE (csantos): How many timing information to save per benchmark. + // This is about the amount of timing info generated on a 24fps input. + // Enough samples to calculate fps + Benchmark.cacheSize = 41; + return Benchmark; +}()); +exports.Benchmark = Benchmark; + +},{}],9:[function(require,module,exports){ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.isSupported = exports.isBrowserSupported = void 0; +/** + * @private + */ +function isBrowserSupported() { + return !!window.OffscreenCanvas && !(/Mobi/i.test(window.navigator.userAgent)) && !!window.chrome; +} +exports.isBrowserSupported = isBrowserSupported; +/** + * Check if the current browser is officially supported by twilio-video-procesors.js. + * This is set to `true` for chromium-based desktop browsers. + * @example + * ```ts + * import { isSupported } from '@twilio/video-processors'; + * + * if (isSupported) { + * // Initialize the background processors + * } + * ``` + */ +exports.isSupported = isBrowserSupported(); + +},{}],10:[function(require,module,exports){ +"use strict"; +// This file is generated on build. To make changes, see scripts/version.js +Object.defineProperty(exports, "__esModule", { value: true }); +exports.version = void 0; +/** + * The current version of the library. + */ +exports.version = '1.0.1'; + +},{}],11:[function(require,module,exports){ +"use strict"; +/** + * @license + * Copyright 2019 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +Object.defineProperty(exports, "__esModule", { value: true }); +exports.BaseModel = void 0; +var tf = require("@tensorflow/tfjs-core"); +/** + * BodyPix supports using various convolution neural network models + * (e.g. ResNet and MobileNetV1) as its underlying base model. + * The following BaseModel interface defines a unified interface for + * creating such BodyPix base models. Currently both MobileNet (in + * ./mobilenet.ts) and ResNet (in ./resnet.ts) implements the BaseModel + * interface. New base models that conform to the BaseModel interface can be + * added to BodyPix. + */ +var BaseModel = /** @class */ (function () { + function BaseModel(model, outputStride) { + this.model = model; + this.outputStride = outputStride; + var inputShape = this.model.inputs[0].shape; + tf.util.assert((inputShape[1] === -1) && (inputShape[2] === -1), function () { return "Input shape [" + inputShape[1] + ", " + inputShape[2] + "] " + + "must both be equal to or -1"; }); + } + /** + * Predicts intermediate Tensor representations. + * + * @param input The input RGB image of the base model. + * A Tensor of shape: [`inputResolution`, `inputResolution`, 3]. + * + * @return A dictionary of base model's intermediate predictions. + * The returned dictionary should contains the following elements: + * - heatmapScores: A Tensor3D that represents the keypoint heatmap scores. + * - offsets: A Tensor3D that represents the offsets. + * - displacementFwd: A Tensor3D that represents the forward displacement. + * - displacementBwd: A Tensor3D that represents the backward displacement. + * - segmentation: A Tensor3D that represents the segmentation of all + * people. + * - longOffsets: A Tensor3D that represents the long offsets used for + * instance grouping. + * - partHeatmaps: A Tensor3D that represents the body part segmentation. + */ + BaseModel.prototype.predict = function (input) { + var _this = this; + return tf.tidy(function () { + var asFloat = _this.preprocessInput(tf.cast(input, 'float32')); + var asBatch = tf.expandDims(asFloat, 0); + var results = _this.model.predict(asBatch); + var results3d = results.map(function (y) { return tf.squeeze(y, [0]); }); + var namedResults = _this.nameOutputResults(results3d); + return { + heatmapScores: tf.sigmoid(namedResults.heatmap), + offsets: namedResults.offsets, + displacementFwd: namedResults.displacementFwd, + displacementBwd: namedResults.displacementBwd, + segmentation: namedResults.segmentation, + partHeatmaps: namedResults.partHeatmaps, + longOffsets: namedResults.longOffsets, + partOffsets: namedResults.partOffsets + }; + }); + }; + /** + * Releases the CPU and GPU memory allocated by the model. + */ + BaseModel.prototype.dispose = function () { + this.model.dispose(); + }; + return BaseModel; +}()); +exports.BaseModel = BaseModel; + +},{"@tensorflow/tfjs-core":35}],12:[function(require,module,exports){ +"use strict"; +/** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +Object.defineProperty(exports, "__esModule", { value: true }); +exports.cpuBlur = void 0; +// method copied from bGlur in https://codepen.io/zhaojun/pen/zZmRQe +function cpuBlur(canvas, image, blur) { + var ctx = canvas.getContext('2d'); + var sum = 0; + var delta = 5; + var alphaLeft = 1 / (2 * Math.PI * delta * delta); + var step = blur < 3 ? 1 : 2; + for (var y = -blur; y <= blur; y += step) { + for (var x = -blur; x <= blur; x += step) { + var weight = alphaLeft * Math.exp(-(x * x + y * y) / (2 * delta * delta)); + sum += weight; + } + } + for (var y = -blur; y <= blur; y += step) { + for (var x = -blur; x <= blur; x += step) { + ctx.globalAlpha = alphaLeft * + Math.exp(-(x * x + y * y) / (2 * delta * delta)) / sum * blur; + ctx.drawImage(image, x, y); + } + } + ctx.globalAlpha = 1; +} +exports.cpuBlur = cpuBlur; + +},{}],13:[function(require,module,exports){ +"use strict"; +/** + * @license + * Copyright 2019 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +var __assign = (this && this.__assign) || function () { + __assign = Object.assign || function(t) { + for (var s, i = 1, n = arguments.length; i < n; i++) { + s = arguments[i]; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) + t[p] = s[p]; + } + return t; + }; + return __assign.apply(this, arguments); +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __generator = (this && this.__generator) || function (thisArg, body) { + var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; + return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; + function verb(n) { return function (v) { return step([n, v]); }; } + function step(op) { + if (f) throw new TypeError("Generator is already executing."); + while (_) try { + if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; + if (y = 0, t) op = [op[0] & 2, t.value]; + switch (op[0]) { + case 0: case 1: t = op; break; + case 4: _.label++; return { value: op[1], done: false }; + case 5: _.label++; y = op[1]; op = [0]; continue; + case 7: op = _.ops.pop(); _.trys.pop(); continue; + default: + if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } + if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } + if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } + if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } + if (t[2]) _.ops.pop(); + _.trys.pop(); continue; + } + op = body.call(thisArg, _); + } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } + if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; + } +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.load = exports.BodyPix = exports.MULTI_PERSON_INSTANCE_INFERENCE_CONFIG = exports.PERSON_INFERENCE_CONFIG = void 0; +var tfconv = require("@tensorflow/tfjs-converter"); +var tf = require("@tensorflow/tfjs-core"); +var decode_part_map_1 = require("./decode_part_map"); +var mobilenet_1 = require("./mobilenet"); +var decode_instance_masks_1 = require("./multi_person/decode_instance_masks"); +var decode_multiple_poses_1 = require("./multi_person/decode_multiple_poses"); +var resnet_1 = require("./resnet"); +var saved_models_1 = require("./saved_models"); +var util_1 = require("./util"); +var APPLY_SIGMOID_ACTIVATION = true; +var FLIP_POSES_AFTER_SCALING = false; +// The default configuration for loading MobileNetV1 based BodyPix. +// +// (And for references, the default configuration for loading ResNet +// based PoseNet is also included). +// +// ``` +// const RESNET_CONFIG = { +// architecture: 'ResNet50', +// outputStride: 32, +// quantBytes: 4, +// } as ModelConfig; +// ``` +var MOBILENET_V1_CONFIG = { + architecture: 'MobileNetV1', + outputStride: 16, + quantBytes: 4, + multiplier: 0.75, +}; +var VALID_ARCHITECTURE = ['MobileNetV1', 'ResNet50']; +var VALID_STRIDE = { + 'MobileNetV1': [8, 16, 32], + 'ResNet50': [32, 16] +}; +var VALID_MULTIPLIER = { + 'MobileNetV1': [0.50, 0.75, 1.0], + 'ResNet50': [1.0] +}; +var VALID_QUANT_BYTES = [1, 2, 4]; +function validateModelConfig(config) { + config = config || MOBILENET_V1_CONFIG; + if (config.architecture == null) { + config.architecture = 'MobileNetV1'; + } + if (VALID_ARCHITECTURE.indexOf(config.architecture) < 0) { + throw new Error("Invalid architecture " + config.architecture + ". " + + ("Should be one of " + VALID_ARCHITECTURE)); + } + if (config.outputStride == null) { + config.outputStride = 16; + } + if (VALID_STRIDE[config.architecture].indexOf(config.outputStride) < 0) { + throw new Error("Invalid outputStride " + config.outputStride + ". " + + ("Should be one of " + VALID_STRIDE[config.architecture] + " ") + + ("for architecture " + config.architecture + ".")); + } + if (config.multiplier == null) { + config.multiplier = 1.0; + } + if (VALID_MULTIPLIER[config.architecture].indexOf(config.multiplier) < 0) { + throw new Error("Invalid multiplier " + config.multiplier + ". " + + ("Should be one of " + VALID_MULTIPLIER[config.architecture] + " ") + + ("for architecture " + config.architecture + ".")); + } + if (config.quantBytes == null) { + config.quantBytes = 4; + } + if (VALID_QUANT_BYTES.indexOf(config.quantBytes) < 0) { + throw new Error("Invalid quantBytes " + config.quantBytes + ". " + + ("Should be one of " + VALID_QUANT_BYTES + " ") + + ("for architecture " + config.architecture + ".")); + } + return config; +} +exports.PERSON_INFERENCE_CONFIG = { + flipHorizontal: false, + internalResolution: 'medium', + segmentationThreshold: 0.7, + maxDetections: 10, + scoreThreshold: 0.4, + nmsRadius: 20, +}; +exports.MULTI_PERSON_INSTANCE_INFERENCE_CONFIG = { + flipHorizontal: false, + internalResolution: 'medium', + segmentationThreshold: 0.7, + maxDetections: 10, + scoreThreshold: 0.4, + nmsRadius: 20, + minKeypointScore: 0.3, + refineSteps: 10 +}; +function validatePersonInferenceConfig(config) { + var segmentationThreshold = config.segmentationThreshold, maxDetections = config.maxDetections, scoreThreshold = config.scoreThreshold, nmsRadius = config.nmsRadius; + if (segmentationThreshold < 0.0 || segmentationThreshold > 1.0) { + throw new Error("segmentationThreshold " + segmentationThreshold + ". " + + "Should be in range [0.0, 1.0]"); + } + if (maxDetections <= 0) { + throw new Error("Invalid maxDetections " + maxDetections + ". " + + "Should be > 0"); + } + if (scoreThreshold < 0.0 || scoreThreshold > 1.0) { + throw new Error("Invalid scoreThreshold " + scoreThreshold + ". " + + "Should be in range [0.0, 1.0]"); + } + if (nmsRadius <= 0) { + throw new Error("Invalid nmsRadius " + nmsRadius + "."); + } +} +function validateMultiPersonInstanceInferenceConfig(config) { + var segmentationThreshold = config.segmentationThreshold, maxDetections = config.maxDetections, scoreThreshold = config.scoreThreshold, nmsRadius = config.nmsRadius, minKeypointScore = config.minKeypointScore, refineSteps = config.refineSteps; + if (segmentationThreshold < 0.0 || segmentationThreshold > 1.0) { + throw new Error("segmentationThreshold " + segmentationThreshold + ". " + + "Should be in range [0.0, 1.0]"); + } + if (maxDetections <= 0) { + throw new Error("Invalid maxDetections " + maxDetections + ". " + + "Should be > 0"); + } + if (scoreThreshold < 0.0 || scoreThreshold > 1.0) { + throw new Error("Invalid scoreThreshold " + scoreThreshold + ". " + + "Should be in range [0.0, 1.0]"); + } + if (nmsRadius <= 0) { + throw new Error("Invalid nmsRadius " + nmsRadius + "."); + } + if (minKeypointScore < 0 || minKeypointScore > 1) { + throw new Error("Invalid minKeypointScore " + minKeypointScore + "." + + "Should be in range [0.0, 1.0]"); + } + if (refineSteps <= 0 || refineSteps > 20) { + throw new Error("Invalid refineSteps " + refineSteps + "." + + "Should be in range [1, 20]"); + } +} +var BodyPix = /** @class */ (function () { + function BodyPix(net) { + this.baseModel = net; + } + BodyPix.prototype.predictForPersonSegmentation = function (input) { + var _a = this.baseModel.predict(input), segmentation = _a.segmentation, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd; + return { + segmentLogits: segmentation, + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + }; + }; + BodyPix.prototype.predictForPersonSegmentationAndPart = function (input) { + var _a = this.baseModel.predict(input), segmentation = _a.segmentation, partHeatmaps = _a.partHeatmaps, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd; + return { + segmentLogits: segmentation, + partHeatmapLogits: partHeatmaps, + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + }; + }; + BodyPix.prototype.predictForMultiPersonInstanceSegmentationAndPart = function (input) { + var _a = this.baseModel.predict(input), segmentation = _a.segmentation, longOffsets = _a.longOffsets, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd, partHeatmaps = _a.partHeatmaps; + return { + segmentLogits: segmentation, + longOffsets: longOffsets, + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + partHeatmaps: partHeatmaps + }; + }; + /** + * Given an image with people, returns a dictionary of all intermediate + * tensors including: 1) a binary array with 1 for the pixels that are part of + * the person, and 0 otherwise, 2) heatmapScores, 3) offsets, and 4) paddings. + * + * @param input ImageData|HTMLImageElement|HTMLCanvasElement|HTMLVideoElement) + * The input image to feed through the network. + * + * @param internalResolution Defaults to 'medium'. The internal resolution + * that the input is resized to before inference. The larger the + * internalResolution the more accurate the model at the cost of slower + * prediction times. Available values are 'low', 'medium', 'high', 'full', or + * a percentage value between 0 and 1. The values 'low', 'medium', 'high', and + * 'full' map to 0.25, 0.5, 0.75, and 1.0 correspondingly. + * + * @param segmentationThreshold The minimum that segmentation values must have + * to be considered part of the person. Affects the generation of the + * segmentation mask. + * + * @return A dictionary containing `segmentation`, `heatmapScores`, `offsets`, + * and `padding`: + * - `segmentation`: A 2d Tensor with 1 for the pixels that are part of the + * person, and 0 otherwise. The width and height correspond to the same + * dimensions of the input image. + * - `heatmapScores`: A 3d Tensor of the keypoint heatmaps used by + * pose estimation decoding. + * - `offsets`: A 3d Tensor of the keypoint offsets used by pose + * estimation decoding. + * - `displacementFwd`: A 3d Tensor of the keypoint forward displacement used + * by pose estimation decoding. + * - `displacementBwd`: A 3d Tensor of the keypoint backward displacement used + * by pose estimation decoding. + * - `padding`: The padding (unit pixels) being applied to the input image + * before it is fed into the model. + */ + BodyPix.prototype.segmentPersonActivation = function (input, internalResolution, segmentationThreshold) { + var _this = this; + if (segmentationThreshold === void 0) { segmentationThreshold = 0.5; } + var _a = util_1.getInputSize(input), height = _a[0], width = _a[1]; + var internalResolutionHeightAndWidth = util_1.toInputResolutionHeightAndWidth(internalResolution, this.baseModel.outputStride, [height, width]); + var _b = util_1.padAndResizeTo(input, internalResolutionHeightAndWidth), resized = _b.resized, padding = _b.padding; + var _c = tf.tidy(function () { + var _a = _this.predictForPersonSegmentation(resized), segmentLogits = _a.segmentLogits, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd; + var _b = resized.shape, resizedHeight = _b[0], resizedWidth = _b[1]; + var scaledSegmentScores = util_1.scaleAndCropToInputTensorShape(segmentLogits, [height, width], [resizedHeight, resizedWidth], [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + return { + segmentation: decode_part_map_1.toMaskTensor(tf.squeeze(scaledSegmentScores), segmentationThreshold), + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + }; + }), segmentation = _c.segmentation, heatmapScores = _c.heatmapScores, offsets = _c.offsets, displacementFwd = _c.displacementFwd, displacementBwd = _c.displacementBwd; + resized.dispose(); + return { + segmentation: segmentation, + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + padding: padding, + internalResolutionHeightAndWidth: internalResolutionHeightAndWidth + }; + }; + /** + * Given an image with many people, returns a PersonSegmentation dictionary + * that contains the segmentation mask for all people and a single pose. + * + * Note: The segmentation mask returned by this method covers all people but + * the pose works well for one person. If you want to estimate instance-level + * multiple person segmentation & pose for each person, use + * `segmentMultiPerson` instead. + * + * @param input ImageData|HTMLImageElement|HTMLCanvasElement|HTMLVideoElement) + * The input image to feed through the network. + * + * @param config PersonInferenceConfig object that contains + * parameters for the BodyPix inference using person decoding. + * + * @return A SemanticPersonSegmentation dictionary that contains height, + * width, the flattened binary segmentation mask and the poses for all people. + * The width and height correspond to the same dimensions of the input image. + * - `height`: The height of the segmentation data in pixel unit. + * - `width`: The width of the segmentation data in pixel unit. + * - `data`: The flattened Uint8Array of segmentation data. 1 means the pixel + * belongs to a person and 0 means the pixel doesn't belong to a person. The + * size of the array is equal to `height` x `width` in row-major order. + * - `allPoses`: The 2d poses of all people. + */ + BodyPix.prototype.segmentPerson = function (input, config) { + if (config === void 0) { config = exports.PERSON_INFERENCE_CONFIG; } + return __awaiter(this, void 0, void 0, function () { + var _a, segmentation, heatmapScores, offsets, displacementFwd, displacementBwd, padding, internalResolutionHeightAndWidth, _b, height, width, result, tensorBuffers, scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, poses; + return __generator(this, function (_c) { + switch (_c.label) { + case 0: + config = __assign(__assign({}, exports.PERSON_INFERENCE_CONFIG), config); + validatePersonInferenceConfig(config); + _a = this.segmentPersonActivation(input, config.internalResolution, config.segmentationThreshold), segmentation = _a.segmentation, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd, padding = _a.padding, internalResolutionHeightAndWidth = _a.internalResolutionHeightAndWidth; + _b = segmentation.shape, height = _b[0], width = _b[1]; + return [4 /*yield*/, segmentation.data()]; + case 1: + result = _c.sent(); + segmentation.dispose(); + return [4 /*yield*/, util_1.toTensorBuffers3D([heatmapScores, offsets, displacementFwd, displacementBwd])]; + case 2: + tensorBuffers = _c.sent(); + scoresBuf = tensorBuffers[0], offsetsBuf = tensorBuffers[1], displacementsFwdBuf = tensorBuffers[2], displacementsBwdBuf = tensorBuffers[3]; + poses = decode_multiple_poses_1.decodeMultiplePoses(scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, this.baseModel.outputStride, config.maxDetections, config.scoreThreshold, config.nmsRadius); + poses = util_1.scaleAndFlipPoses(poses, [height, width], internalResolutionHeightAndWidth, padding, FLIP_POSES_AFTER_SCALING); + heatmapScores.dispose(); + offsets.dispose(); + displacementFwd.dispose(); + displacementBwd.dispose(); + return [2 /*return*/, { height: height, width: width, data: result, allPoses: poses }]; + } + }); + }); + }; + /** + * Given an image with multiple people, returns an *array* of + * PersonSegmentation object. Each element in the array corresponding to one + * of the people in the input image. In other words, it predicts + * instance-level multiple person segmentation & pose for each person. + * + * The model does standard ImageNet pre-processing before inferring through + * the model. The image pixels should have values [0-255]. + * + * @param input + * ImageData|HTMLImageElement|HTMLCanvasElement|HTMLVideoElement) The input + * image to feed through the network. + * + * @param config MultiPersonInferenceConfig object that contains + * parameters for the BodyPix inference using multi-person decoding. + * + * @return An array of PersonSegmentation object, each containing a width, + * height, a binary array (1 for the pixels that are part of the + * person, and 0 otherwise) and 2D pose. The array size corresponds to the + * number of pixels in the image. The width and height correspond to the + * dimensions of the image the binary array is shaped to, which are the same + * dimensions of the input image. + */ + BodyPix.prototype.segmentMultiPerson = function (input, config) { + if (config === void 0) { config = exports.MULTI_PERSON_INSTANCE_INFERENCE_CONFIG; } + return __awaiter(this, void 0, void 0, function () { + var _a, height, width, internalResolutionHeightAndWidth, _b, resized, padding, _c, segmentation, longOffsets, heatmapScoresRaw, offsetsRaw, displacementFwdRaw, displacementBwdRaw, tensorBuffers, scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, poses, instanceMasks; + var _this = this; + return __generator(this, function (_d) { + switch (_d.label) { + case 0: + config = __assign(__assign({}, exports.MULTI_PERSON_INSTANCE_INFERENCE_CONFIG), config); + validateMultiPersonInstanceInferenceConfig(config); + _a = util_1.getInputSize(input), height = _a[0], width = _a[1]; + internalResolutionHeightAndWidth = util_1.toInputResolutionHeightAndWidth(config.internalResolution, this.baseModel.outputStride, [height, width]); + _b = util_1.padAndResizeTo(input, internalResolutionHeightAndWidth), resized = _b.resized, padding = _b.padding; + _c = tf.tidy(function () { + var _a = _this.predictForMultiPersonInstanceSegmentationAndPart(resized), segmentLogits = _a.segmentLogits, longOffsets = _a.longOffsets, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd; + var scaledSegmentScores = util_1.scaleAndCropToInputTensorShape(segmentLogits, [height, width], internalResolutionHeightAndWidth, [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + var longOffsetsResized = false; + var scaledLongOffsets; + if (longOffsetsResized) { + scaledLongOffsets = util_1.scaleAndCropToInputTensorShape(longOffsets, [height, width], internalResolutionHeightAndWidth, [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + } + else { + scaledLongOffsets = longOffsets; + } + var segmentation = decode_part_map_1.toMaskTensor(tf.squeeze(scaledSegmentScores), config.segmentationThreshold); + return { + segmentation: segmentation, + longOffsets: scaledLongOffsets, + heatmapScoresRaw: heatmapScores, + offsetsRaw: offsets, + displacementFwdRaw: displacementFwd, + displacementBwdRaw: displacementBwd, + }; + }), segmentation = _c.segmentation, longOffsets = _c.longOffsets, heatmapScoresRaw = _c.heatmapScoresRaw, offsetsRaw = _c.offsetsRaw, displacementFwdRaw = _c.displacementFwdRaw, displacementBwdRaw = _c.displacementBwdRaw; + return [4 /*yield*/, util_1.toTensorBuffers3D([heatmapScoresRaw, offsetsRaw, displacementFwdRaw, displacementBwdRaw])]; + case 1: + tensorBuffers = _d.sent(); + scoresBuf = tensorBuffers[0], offsetsBuf = tensorBuffers[1], displacementsFwdBuf = tensorBuffers[2], displacementsBwdBuf = tensorBuffers[3]; + poses = decode_multiple_poses_1.decodeMultiplePoses(scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, this.baseModel.outputStride, config.maxDetections, config.scoreThreshold, config.nmsRadius); + poses = util_1.scaleAndFlipPoses(poses, [height, width], internalResolutionHeightAndWidth, padding, FLIP_POSES_AFTER_SCALING); + return [4 /*yield*/, decode_instance_masks_1.decodePersonInstanceMasks(segmentation, longOffsets, poses, height, width, this.baseModel.outputStride, internalResolutionHeightAndWidth, padding, config.scoreThreshold, config.refineSteps, config.minKeypointScore, config.maxDetections)]; + case 2: + instanceMasks = _d.sent(); + resized.dispose(); + segmentation.dispose(); + longOffsets.dispose(); + heatmapScoresRaw.dispose(); + offsetsRaw.dispose(); + displacementFwdRaw.dispose(); + displacementBwdRaw.dispose(); + return [2 /*return*/, instanceMasks]; + } + }); + }); + }; + /** + * Given an image with many people, returns a dictionary containing: height, + * width, a tensor with a part id from 0-24 for the pixels that are + * part of a corresponding body part, and -1 otherwise. This does standard + * ImageNet pre-processing before inferring through the model. The image + * should pixels should have values [0-255]. + * + * @param input ImageData|HTMLImageElement|HTMLCanvasElement|HTMLVideoElement) + * The input image to feed through the network. + * + * @param internalResolution Defaults to 'medium'. The internal resolution + * percentage that the input is resized to before inference. The larger the + * internalResolution the more accurate the model at the cost of slower + * prediction times. Available values are 'low', 'medium', 'high', 'full', or + * a percentage value between 0 and 1. The values 'low', 'medium', 'high', and + * 'full' map to 0.25, 0.5, 0.75, and 1.0 correspondingly. + * + * @param segmentationThreshold The minimum that segmentation values must have + * to be considered part of the person. Affects the clipping of the colored + * part image. + * + * @return A dictionary containing `partSegmentation`, `heatmapScores`, + * `offsets`, and `padding`: + * - `partSegmentation`: A 2d Tensor with a part id from 0-24 for + * the pixels that are part of a corresponding body part, and -1 otherwise. + * - `heatmapScores`: A 3d Tensor of the keypoint heatmaps used by + * single-person pose estimation decoding. + * - `offsets`: A 3d Tensor of the keypoint offsets used by single-person pose + * estimation decoding. + * - `displacementFwd`: A 3d Tensor of the keypoint forward displacement + * used by pose estimation decoding. + * - `displacementBwd`: A 3d Tensor of the keypoint backward displacement used + * by pose estimation decoding. + * - `padding`: The padding (unit pixels) being applied to the input image + * before it is fed into the model. + */ + BodyPix.prototype.segmentPersonPartsActivation = function (input, internalResolution, segmentationThreshold) { + var _this = this; + if (segmentationThreshold === void 0) { segmentationThreshold = 0.5; } + var _a = util_1.getInputSize(input), height = _a[0], width = _a[1]; + var internalResolutionHeightAndWidth = util_1.toInputResolutionHeightAndWidth(internalResolution, this.baseModel.outputStride, [height, width]); + var _b = util_1.padAndResizeTo(input, internalResolutionHeightAndWidth), resized = _b.resized, padding = _b.padding; + var _c = tf.tidy(function () { + var _a = _this.predictForPersonSegmentationAndPart(resized), segmentLogits = _a.segmentLogits, partHeatmapLogits = _a.partHeatmapLogits, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd; + var _b = resized.shape, resizedHeight = _b[0], resizedWidth = _b[1]; + var scaledSegmentScores = util_1.scaleAndCropToInputTensorShape(segmentLogits, [height, width], [resizedHeight, resizedWidth], [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + var scaledPartHeatmapScore = util_1.scaleAndCropToInputTensorShape(partHeatmapLogits, [height, width], [resizedHeight, resizedWidth], [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + var segmentation = decode_part_map_1.toMaskTensor(tf.squeeze(scaledSegmentScores), segmentationThreshold); + return { + partSegmentation: decode_part_map_1.decodePartSegmentation(segmentation, scaledPartHeatmapScore), + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + }; + }), partSegmentation = _c.partSegmentation, heatmapScores = _c.heatmapScores, offsets = _c.offsets, displacementFwd = _c.displacementFwd, displacementBwd = _c.displacementBwd; + resized.dispose(); + return { + partSegmentation: partSegmentation, + heatmapScores: heatmapScores, + offsets: offsets, + displacementFwd: displacementFwd, + displacementBwd: displacementBwd, + padding: padding, + internalResolutionHeightAndWidth: internalResolutionHeightAndWidth + }; + }; + /** + * Given an image with many people, returns a PartSegmentation dictionary that + * contains the body part segmentation mask for all people and a single pose. + * + * Note: The body part segmentation mask returned by this method covers all + * people but the pose works well when there is one person. If you want to + * estimate instance-level multiple person body part segmentation & pose for + * each person, use `segmentMultiPersonParts` instead. + * + * @param input ImageData|HTMLImageElement|HTMLCanvasElement|HTMLVideoElement) + * The input image to feed through the network. + * + * @param config PersonInferenceConfig object that contains + * parameters for the BodyPix inference using single person decoding. + * + * @return A SemanticPartSegmentation dictionary that contains height, width, + * the flattened binary segmentation mask and the pose for the person. The + * width and height correspond to the same dimensions of the input image. + * - `height`: The height of the person part segmentation data in pixel unit. + * - `width`: The width of the person part segmentation data in pixel unit. + * - `data`: The flattened Int32Array of person part segmentation data with a + * part id from 0-24 for the pixels that are part of a corresponding body + * part, and -1 otherwise. The size of the array is equal to `height` x + * `width` in row-major order. + * - `allPoses`: The 2d poses of all people. + */ + BodyPix.prototype.segmentPersonParts = function (input, config) { + if (config === void 0) { config = exports.PERSON_INFERENCE_CONFIG; } + return __awaiter(this, void 0, void 0, function () { + var _a, partSegmentation, heatmapScores, offsets, displacementFwd, displacementBwd, padding, internalResolutionHeightAndWidth, _b, height, width, data, tensorBuffers, scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, poses; + return __generator(this, function (_c) { + switch (_c.label) { + case 0: + config = __assign(__assign({}, exports.PERSON_INFERENCE_CONFIG), config); + validatePersonInferenceConfig(config); + _a = this.segmentPersonPartsActivation(input, config.internalResolution, config.segmentationThreshold), partSegmentation = _a.partSegmentation, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd, padding = _a.padding, internalResolutionHeightAndWidth = _a.internalResolutionHeightAndWidth; + _b = partSegmentation.shape, height = _b[0], width = _b[1]; + return [4 /*yield*/, partSegmentation.data()]; + case 1: + data = _c.sent(); + partSegmentation.dispose(); + return [4 /*yield*/, util_1.toTensorBuffers3D([heatmapScores, offsets, displacementFwd, displacementBwd])]; + case 2: + tensorBuffers = _c.sent(); + scoresBuf = tensorBuffers[0], offsetsBuf = tensorBuffers[1], displacementsFwdBuf = tensorBuffers[2], displacementsBwdBuf = tensorBuffers[3]; + poses = decode_multiple_poses_1.decodeMultiplePoses(scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, this.baseModel.outputStride, config.maxDetections, config.scoreThreshold, config.nmsRadius); + poses = util_1.scaleAndFlipPoses(poses, [height, width], internalResolutionHeightAndWidth, padding, FLIP_POSES_AFTER_SCALING); + heatmapScores.dispose(); + offsets.dispose(); + displacementFwd.dispose(); + displacementBwd.dispose(); + return [2 /*return*/, { height: height, width: width, data: data, allPoses: poses }]; + } + }); + }); + }; + /** + * Given an image with multiple people, returns an *array* of PartSegmentation + * object. Each element in the array corresponding to one + * of the people in the input image. In other words, it predicts + * instance-level multiple person body part segmentation & pose for each + * person. + * + * This does standard ImageNet pre-processing before inferring through + * the model. The image pixels should have values [0-255]. + * + * @param input + * ImageData|HTMLImageElement|HTMLCanvasElement|HTMLVideoElement) The input + * image to feed through the network. + * + * @param config MultiPersonInferenceConfig object that contains + * parameters for the BodyPix inference using multi-person decoding. + * + * @return An array of PartSegmentation object, each containing a width, + * height, a flattened array (with part id from 0-24 for the pixels that are + * part of a corresponding body part, and -1 otherwise) and 2D pose. The width + * and height correspond to the dimensions of the image. Each flattened part + * segmentation array size is equal to `height` x `width`. + */ + BodyPix.prototype.segmentMultiPersonParts = function (input, config) { + if (config === void 0) { config = exports.MULTI_PERSON_INSTANCE_INFERENCE_CONFIG; } + return __awaiter(this, void 0, void 0, function () { + var _a, height, width, internalResolutionHeightAndWidth, _b, resized, padding, _c, segmentation, longOffsets, heatmapScoresRaw, offsetsRaw, displacementFwdRaw, displacementBwdRaw, partSegmentation, tensorBuffers, scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, poses, instanceMasks; + var _this = this; + return __generator(this, function (_d) { + switch (_d.label) { + case 0: + config = __assign(__assign({}, exports.MULTI_PERSON_INSTANCE_INFERENCE_CONFIG), config); + validateMultiPersonInstanceInferenceConfig(config); + _a = util_1.getInputSize(input), height = _a[0], width = _a[1]; + internalResolutionHeightAndWidth = util_1.toInputResolutionHeightAndWidth(config.internalResolution, this.baseModel.outputStride, [height, width]); + _b = util_1.padAndResizeTo(input, internalResolutionHeightAndWidth), resized = _b.resized, padding = _b.padding; + _c = tf.tidy(function () { + var _a = _this.predictForMultiPersonInstanceSegmentationAndPart(resized), segmentLogits = _a.segmentLogits, longOffsets = _a.longOffsets, heatmapScores = _a.heatmapScores, offsets = _a.offsets, displacementFwd = _a.displacementFwd, displacementBwd = _a.displacementBwd, partHeatmaps = _a.partHeatmaps; + // decoding with scaling. + var scaledSegmentScores = util_1.scaleAndCropToInputTensorShape(segmentLogits, [height, width], internalResolutionHeightAndWidth, [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + // decoding with scaling. + var scaledPartSegmentationScores = util_1.scaleAndCropToInputTensorShape(partHeatmaps, [height, width], internalResolutionHeightAndWidth, [[padding.top, padding.bottom], [padding.left, padding.right]], APPLY_SIGMOID_ACTIVATION); + var scaledLongOffsets = longOffsets; + var segmentation = decode_part_map_1.toMaskTensor(tf.squeeze(scaledSegmentScores), config.segmentationThreshold); + var partSegmentation = decode_part_map_1.decodeOnlyPartSegmentation(scaledPartSegmentationScores); + return { + segmentation: segmentation, + longOffsets: scaledLongOffsets, + heatmapScoresRaw: heatmapScores, + offsetsRaw: offsets, + displacementFwdRaw: displacementFwd, + displacementBwdRaw: displacementBwd, + partSegmentation: partSegmentation + }; + }), segmentation = _c.segmentation, longOffsets = _c.longOffsets, heatmapScoresRaw = _c.heatmapScoresRaw, offsetsRaw = _c.offsetsRaw, displacementFwdRaw = _c.displacementFwdRaw, displacementBwdRaw = _c.displacementBwdRaw, partSegmentation = _c.partSegmentation; + return [4 /*yield*/, util_1.toTensorBuffers3D([heatmapScoresRaw, offsetsRaw, displacementFwdRaw, displacementBwdRaw])]; + case 1: + tensorBuffers = _d.sent(); + scoresBuf = tensorBuffers[0], offsetsBuf = tensorBuffers[1], displacementsFwdBuf = tensorBuffers[2], displacementsBwdBuf = tensorBuffers[3]; + poses = decode_multiple_poses_1.decodeMultiplePoses(scoresBuf, offsetsBuf, displacementsFwdBuf, displacementsBwdBuf, this.baseModel.outputStride, config.maxDetections, config.scoreThreshold, config.nmsRadius); + poses = util_1.scaleAndFlipPoses(poses, [height, width], internalResolutionHeightAndWidth, padding, FLIP_POSES_AFTER_SCALING); + return [4 /*yield*/, decode_instance_masks_1.decodePersonInstancePartMasks(segmentation, longOffsets, partSegmentation, poses, height, width, this.baseModel.outputStride, internalResolutionHeightAndWidth, padding, config.scoreThreshold, config.refineSteps, config.minKeypointScore, config.maxDetections)]; + case 2: + instanceMasks = _d.sent(); + resized.dispose(); + segmentation.dispose(); + longOffsets.dispose(); + heatmapScoresRaw.dispose(); + offsetsRaw.dispose(); + displacementFwdRaw.dispose(); + displacementBwdRaw.dispose(); + partSegmentation.dispose(); + return [2 /*return*/, instanceMasks]; + } + }); + }); + }; + BodyPix.prototype.dispose = function () { + this.baseModel.dispose(); + }; + return BodyPix; +}()); +exports.BodyPix = BodyPix; +/** + * Loads the MobileNet BodyPix model. + */ +function loadMobileNet(config) { + return __awaiter(this, void 0, void 0, function () { + var outputStride, quantBytes, multiplier, url, graphModel, mobilenet; + return __generator(this, function (_a) { + switch (_a.label) { + case 0: + outputStride = config.outputStride; + quantBytes = config.quantBytes; + multiplier = config.multiplier; + if (tf == null) { + throw new Error("Cannot find TensorFlow.js. If you are using a + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+ +

Class GaussianBlurBackgroundProcessor

+
+
+
+
+
+
+
+
+
+

The GaussianBlurBackgroundProcessor, when added to a VideoTrack, + applies a gaussian blur filter on the background in each video frame + and leaves the foreground (person(s)) untouched. Each instance of + GaussianBlurBackgroundProcessor should be added to only one VideoTrack + at a time to prevent overlapping of image data from multiple VideoTracks.

+
+
+
example
+
import { createLocalVideoTrack } from 'twilio-video';
+import { GaussianBlurBackgroundProcessor } from '@twilio/video-processors';
+
+const blurBackground = new GaussianBlurBackgroundProcessor({
+  assetsPath: 'https://my-server-path/assets'
+});
+
+blurBackground.loadModel().then(() => {
+  createLocalVideoTrack({
+    width: 640,
+    height: 480,
+    frameRate: 24
+  }).then(track => {
+    track.addProcessor(blurBackground);
+  });
+});
+
+
+
+
+
+
+

Hierarchy

+
    +
  • + BackgroundProcessor +
      +
    • + GaussianBlurBackgroundProcessor +
    • +
    +
  • +
+
+
+

Index

+
+
+
+

Constructors

+ +
+
+

Accessors

+ +
+
+

Methods

+ +
+
+
+
+
+

Constructors

+
+ +

constructor

+ + +
+
+
+

Accessors

+
+ +

blurFilterRadius

+
    +
  • get blurFilterRadius(): number
  • +
  • set blurFilterRadius(radius: number): void
  • +
+
    +
  • + +
    +
    +

    The current background blur filter radius in pixels.

    +
    +
    +

    Returns number

    +
  • +
  • + +
    +
    +

    Set a new background blur filter radius in pixels.

    +
    +
    +

    Parameters

    +
      +
    • +
      radius: number
      +
    • +
    +

    Returns void

    +
  • +
+
+
+ +

maskBlurRadius

+
    +
  • get maskBlurRadius(): number
  • +
  • set maskBlurRadius(radius: number): void
  • +
+
    +
  • + +
    +
    +

    The current blur radius when smoothing out the edges of the person's mask.

    +
    +
    +

    Returns number

    +
  • +
  • + +
    +
    +

    Set a new blur radius to be used when smoothing out the edges of the person's mask.

    +
    +
    +

    Parameters

    +
      +
    • +
      radius: number
      +
    • +
    +

    Returns void

    +
  • +
+
+
+
+

Methods

+
+ +

loadModel

+
    +
  • loadModel(): Promise<void>
  • +
+
    +
  • + +
    +
    +

    Load the segmentation model. + Call this method before attaching the processor to ensure + video frames are processed correctly.

    +
    +
    +

    Returns Promise<void>

    +
  • +
+
+
+ +

processFrame

+
    +
  • processFrame(inputFrameBuffer: OffscreenCanvas, outputFrameBuffer: HTMLCanvasElement): Promise<void>
  • +
+
    +
  • + +
    +
    +

    Apply a transform to the background of an input video frame and leaving + the foreground (person(s)) untouched. Any exception detected will + result in the frame being dropped.

    +
    +
    +

    Parameters

    +
      +
    • +
      inputFrameBuffer: OffscreenCanvas
      +
      +

      The source of the input frame to process.

      +
      +
    • +
    • +
      outputFrameBuffer: HTMLCanvasElement
      +
      +

      The output frame buffer to use to draw the processed frame.

      +
      +
    • +
    +

    Returns Promise<void>

    +
  • +
+
+
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/dist/docs/classes/virtualbackgroundprocessor.html b/dist/docs/classes/virtualbackgroundprocessor.html new file mode 100644 index 0000000..2f3e718 --- /dev/null +++ b/dist/docs/classes/virtualbackgroundprocessor.html @@ -0,0 +1,434 @@ + + + + + + VirtualBackgroundProcessor | @twilio/video-processors + + + + + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+ +

Class VirtualBackgroundProcessor

+
+
+
+
+
+
+
+
+
+

The VirtualBackgroundProcessor, when added to a VideoTrack, + replaces the background in each video frame with a given image, + and leaves the foreground (person(s)) untouched. Each instance of + VirtualBackgroundProcessor should be added to only one VideoTrack + at a time to prevent overlapping of image data from multiple VideoTracks.

+
+
+
example
+
import { createLocalVideoTrack } from 'twilio-video';
+import { VirtualBackgroundProcessor } from '@twilio/video-processors';
+
+let virtualBackground;
+const img = new Image();
+
+img.onload = () => {
+  virtualBackground = new VirtualBackgroundProcessor({
+    assetsPath: 'https://my-server-path/assets',
+    backgroundImage: img,
+  });
+
+  virtualBackground.loadModel().then(() => {
+    createLocalVideoTrack({
+      width: 640,
+      height: 480,
+      frameRate: 24
+    }).then(track => {
+      track.addProcessor(virtualBackground);
+    });
+  });
+};
+img.src = '/background.jpg';
+
+
+
+
+
+
+

Hierarchy

+
    +
  • + BackgroundProcessor +
      +
    • + VirtualBackgroundProcessor +
    • +
    +
  • +
+
+
+

Index

+
+
+
+

Constructors

+ +
+
+

Accessors

+ +
+
+

Methods

+ +
+
+
+
+
+

Constructors

+
+ +

constructor

+ + +
+
+
+

Accessors

+
+ +

backgroundImage

+
    +
  • get backgroundImage(): HTMLImageElement
  • +
  • set backgroundImage(image: HTMLImageElement): void
  • +
+
    +
  • + +
    +
    +

    The HTMLImageElement representing the current background image.

    +
    +
    +

    Returns HTMLImageElement

    +
  • +
  • + +
    +
    +

    Set an HTMLImageElement as the new background image. + An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow + security guidelines + when loading the image from a different origin. Failing to do so will result to an empty output frame.

    +
    +
    +

    Parameters

    +
      +
    • +
      image: HTMLImageElement
      +
    • +
    +

    Returns void

    +
  • +
+
+
+ +

fitType

+ +
    +
  • + +
    +
    +

    The current ImageFit for positioning of the background image in the viewport.

    +
    +
    +

    Returns ImageFit

    +
  • +
  • + +
    +
    +

    Set a new ImageFit to be used for positioning the background image in the viewport.

    +
    +
    +

    Parameters

    + +

    Returns void

    +
  • +
+
+
+ +

maskBlurRadius

+
    +
  • get maskBlurRadius(): number
  • +
  • set maskBlurRadius(radius: number): void
  • +
+
    +
  • + +
    +
    +

    The current blur radius when smoothing out the edges of the person's mask.

    +
    +
    +

    Returns number

    +
  • +
  • + +
    +
    +

    Set a new blur radius to be used when smoothing out the edges of the person's mask.

    +
    +
    +

    Parameters

    +
      +
    • +
      radius: number
      +
    • +
    +

    Returns void

    +
  • +
+
+
+
+

Methods

+
+ +

loadModel

+
    +
  • loadModel(): Promise<void>
  • +
+
    +
  • + +
    +
    +

    Load the segmentation model. + Call this method before attaching the processor to ensure + video frames are processed correctly.

    +
    +
    +

    Returns Promise<void>

    +
  • +
+
+
+ +

processFrame

+
    +
  • processFrame(inputFrameBuffer: OffscreenCanvas, outputFrameBuffer: HTMLCanvasElement): Promise<void>
  • +
+
    +
  • + +
    +
    +

    Apply a transform to the background of an input video frame and leaving + the foreground (person(s)) untouched. Any exception detected will + result in the frame being dropped.

    +
    +
    +

    Parameters

    +
      +
    • +
      inputFrameBuffer: OffscreenCanvas
      +
      +

      The source of the input frame to process.

      +
      +
    • +
    • +
      outputFrameBuffer: HTMLCanvasElement
      +
      +

      The output frame buffer to use to draw the processed frame.

      +
      +
    • +
    +

    Returns Promise<void>

    +
  • +
+
+
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/dist/docs/enums/imagefit.html b/dist/docs/enums/imagefit.html new file mode 100644 index 0000000..514c5fa --- /dev/null +++ b/dist/docs/enums/imagefit.html @@ -0,0 +1,221 @@ + + + + + + ImageFit | @twilio/video-processors + + + + + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+ +

Enumeration ImageFit

+
+
+
+
+
+
+
+
+
+

ImageFit specifies the positioning of an image inside a viewport.

+
+
+
+
+

Index

+
+
+
+

Enumeration members

+ +
+
+
+
+
+

Enumeration members

+
+ +

Contain

+
Contain: = "Contain"
+ +
+
+

Scale the image up or down to fill the viewport while preserving the aspect ratio. + The image will be fully visible but will add empty space in the viewport if + aspect ratios do not match.

+
+
+
+
+ +

Cover

+
Cover: = "Cover"
+ +
+
+

Scale the image to fill both height and width of the viewport while preserving + the aspect ratio, but will crop the image if aspect ratios do not match.

+
+
+
+
+ +

Fill

+
Fill: = "Fill"
+ +
+
+

Stretches the image to fill the viewport regardless of aspect ratio.

+
+
+
+
+ +

None

+
None: = "None"
+ +
+
+

Ignore height and width and use the original size.

+
+
+
+
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/dist/docs/index.html b/dist/docs/index.html new file mode 100644 index 0000000..9543583 --- /dev/null +++ b/dist/docs/index.html @@ -0,0 +1,176 @@ + + + + + + @twilio/video-processors + + + + + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+

@twilio/video-processors

+
+
+
+
+
+
+
+ +

Twilio Video Processors

+
+

Twilio Video Processors is a collection of video processing tools which can be used with Twilio Video JavaScript SDK to apply transformations and filters to a VideoTrack.

+

   See it live here!

+ +

Features

+
+

The following Video Processors are provided to apply transformations and filters to a person's background. You can also use them as a reference for creating your own Video Processors that can be used with Twilio Video JavaScript SDK.

+ + +

Prerequisites

+
+ + +

Installation

+
+ +

NPM

+
+

You can install directly from npm.

+
npm install @twilio/video-processors --save
+
+

Using this method, you can import twilio-video-processors like so:

+
import * as VideoProcessors from '@twilio/video-processors';
+
+ +

Script tag

+
+

You can also copy twilio-video-processors.js from the dist/build folder and include it directly in your web app using a <script> tag.

+
<script src="https://my-server-path/twilio-video-processors.js"></script>
+
+

Using this method, twilio-video-processors.js will set a browser global:

+
const VideoProcessors = Twilio.VideoProcessors;
+
+ +

Assets

+
+

In order to achieve the best performance, the VideoProcessors use WebAssembly to run TensorFlow Lite for person segmentation. You need to serve the tflite model and binaries so they can be loaded properly. These files can be downloaded from the dist/build folder. Check the API docs for details and the examples folder for reference.

+ +

Usage

+
+

These processors are only supported on chromium-based desktop browsers at this moment and will not work on other browsers. For best performance and accuracy, we recommend that, when calling Video.createLocalVideoTrack, the video capture constraints be set to 24 fps frame rate with 640x480 capture dimensions. Higher resolutions can still be used for increased accuracy, but may degrade performance, resulting in a lower output frame rate on low powered devices.

+

Additionally, these processors run TensorFlow Lite using MediaPipe Selfie Segmentation Landscape Model and requires Chrome's WebAssembly SIMD support in order to achieve the best performance. WebAssembly SIMD can be turned on by visiting chrome://flags on versions 84 through 90. This will be enabled by default on Chrome 91+. You can also enable this on versions 84-90 for your users without turning on the flag by registering for a Chrome Origin Trial for your website.

+

Please check out the following pages for example usage. For more information, please refer to the API Docs.

+ +
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/dist/docs/interfaces/gaussianblurbackgroundprocessoroptions.html b/dist/docs/interfaces/gaussianblurbackgroundprocessoroptions.html new file mode 100644 index 0000000..51f65fb --- /dev/null +++ b/dist/docs/interfaces/gaussianblurbackgroundprocessoroptions.html @@ -0,0 +1,251 @@ + + + + + + GaussianBlurBackgroundProcessorOptions | @twilio/video-processors + + + + + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+ +

Interface GaussianBlurBackgroundProcessorOptions

+
+
+
+
+
+
+
+
+
+

Options passed to GaussianBlurBackgroundProcessor constructor.

+
+
+
+
+

Hierarchy

+
    +
  • + BackgroundProcessorOptions +
      +
    • + GaussianBlurBackgroundProcessorOptions +
    • +
    +
  • +
+
+
+

Index

+
+
+
+

Properties

+ +
+
+
+
+
+

Properties

+
+ +

assetsPath

+
assetsPath: string
+ +
+
+

The VideoProcessors load assets dynamically depending on certain browser features. + You need to serve all the assets and provide the root path so they can be referenced properly. + These assets can be copied from the dist/build folder which you can add as part of your deployment process.

+
+
+
example
+

+
+ For virtual background: +
+
const virtualBackground = new VirtualBackgroundProcessor({
+  assetsPath: 'https://my-server-path/assets',
+  backgroundImage: img,
+});
+await virtualBackground.loadModel();
+
+
+ For blur background: +
+
const blurBackground = new GaussianBlurBackgroundProcessor({
+  assetsPath: 'https://my-server-path/assets'
+});
+await blurBackground.loadModel();
+
+
+
+
+
+
+ +

Optional blurFilterRadius

+
blurFilterRadius: undefined | number
+ +
+
+

The background blur filter radius to use in pixels.

+
+
+
default
+
15
+
+
+
+
+
+
+ +

Optional maskBlurRadius

+
maskBlurRadius: undefined | number
+ +
+
+

The blur radius to use when smoothing out the edges of the person's mask.

+
+
+
default
+
5
+
+
+
+
+
+
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/dist/docs/interfaces/virtualbackgroundprocessoroptions.html b/dist/docs/interfaces/virtualbackgroundprocessoroptions.html new file mode 100644 index 0000000..ddf42ee --- /dev/null +++ b/dist/docs/interfaces/virtualbackgroundprocessoroptions.html @@ -0,0 +1,270 @@ + + + + + + VirtualBackgroundProcessorOptions | @twilio/video-processors + + + + + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+ +

Interface VirtualBackgroundProcessorOptions

+
+
+
+
+
+
+
+
+
+

Options passed to VirtualBackgroundProcessor constructor.

+
+
+
+
+

Hierarchy

+
    +
  • + BackgroundProcessorOptions +
      +
    • + VirtualBackgroundProcessorOptions +
    • +
    +
  • +
+
+
+

Index

+
+
+
+

Properties

+ +
+
+
+
+
+

Properties

+
+ +

assetsPath

+
assetsPath: string
+ +
+
+

The VideoProcessors load assets dynamically depending on certain browser features. + You need to serve all the assets and provide the root path so they can be referenced properly. + These assets can be copied from the dist/build folder which you can add as part of your deployment process.

+
+
+
example
+

+
+ For virtual background: +
+
const virtualBackground = new VirtualBackgroundProcessor({
+  assetsPath: 'https://my-server-path/assets',
+  backgroundImage: img,
+});
+await virtualBackground.loadModel();
+
+
+ For blur background: +
+
const blurBackground = new GaussianBlurBackgroundProcessor({
+  assetsPath: 'https://my-server-path/assets'
+});
+await blurBackground.loadModel();
+
+
+
+
+
+
+ +

backgroundImage

+
backgroundImage: HTMLImageElement
+ +
+
+

The HTMLImageElement to use for background replacement. + An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow + security guidelines + when loading the image from a different origin. Failing to do so will result to an empty output frame.

+
+
+
+
+ +

Optional fitType

+
fitType: undefined | Contain | Cover | Fill | None
+ +
+
+

The ImageFit to use for positioning of the background image in the viewport.

+
+
+
default
+
'Fill'
+
+
+
+
+
+
+ +

Optional maskBlurRadius

+
maskBlurRadius: undefined | number
+ +
+
+

The blur radius to use when smoothing out the edges of the person's mask.

+
+
+
default
+
5
+
+
+
+
+
+
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/dist/docs/modules.html b/dist/docs/modules.html new file mode 100644 index 0000000..d53452f --- /dev/null +++ b/dist/docs/modules.html @@ -0,0 +1,190 @@ + + + + + + @twilio/video-processors + + + + + + +
+
+
+
+ +
+
+ Options +
+
+ All +
    +
  • Public
  • +
  • Public/Protected
  • +
  • All
  • +
+
+ + + + +
+
+ Menu +
+
+
+
+
+
+

@twilio/video-processors

+
+
+
+
+
+
+
+

Index

+
+
+
+

Enumerations

+ +
+
+

Classes

+ +
+
+

Interfaces

+ +
+
+

Variables

+ +
+
+
+
+
+

Variables

+
+ +

Const isSupported

+
isSupported: boolean = ...
+ +
+
+

Check if the current browser is officially supported by twilio-video-procesors.js. + This is set to true for chromium-based desktop browsers.

+
+
+
example
+
import { isSupported } from '@twilio/video-processors';
+
+if (isSupported) {
+  // Initialize the background processors
+}
+
+
+
+
+
+
+ +

Const version

+
version: string = '1.0.1'
+ +
+
+

The current version of the library.

+
+
+
+
+
+ +
+
+
+
+

Legend

+
+
    +
  • Constructor
  • +
  • Method
  • +
  • Accessor
  • +
+
    +
  • Property
  • +
+
+
+
+
+

Generated using TypeDoc

+
+
+ + + \ No newline at end of file diff --git a/es5/constants.d.ts b/es5/constants.d.ts new file mode 100644 index 0000000..84e4f34 --- /dev/null +++ b/es5/constants.d.ts @@ -0,0 +1,14 @@ +import { ModelConfig, PersonInferenceConfig } from '@tensorflow-models/body-pix/dist/body_pix_model'; +import { Dimensions } from './types'; +export declare const BLUR_FILTER_RADIUS = 15; +export declare const DEBOUNCE = 2; +export declare const MASK_BLUR_RADIUS = 5; +export declare const HISTORY_COUNT = 5; +export declare const PERSON_PROBABILITY_THRESHOLD = 0.4; +export declare const MODEL_NAME = "selfie_segmentation_landscape.tflite"; +export declare const TFLITE_LOADER_NAME = "tflite-1-0-0.js"; +export declare const TFLITE_SIMD_LOADER_NAME = "tflite-simd-1-0-0.js"; +export declare const MODEL_CONFIG: ModelConfig; +export declare const INFERENCE_CONFIG: PersonInferenceConfig; +export declare const BODYPIX_INFERENCE_DIMENSIONS: Dimensions; +export declare const WASM_INFERENCE_DIMENSIONS: Dimensions; diff --git a/es5/constants.js b/es5/constants.js new file mode 100644 index 0000000..992716d --- /dev/null +++ b/es5/constants.js @@ -0,0 +1,31 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.WASM_INFERENCE_DIMENSIONS = exports.BODYPIX_INFERENCE_DIMENSIONS = exports.INFERENCE_CONFIG = exports.MODEL_CONFIG = exports.TFLITE_SIMD_LOADER_NAME = exports.TFLITE_LOADER_NAME = exports.MODEL_NAME = exports.PERSON_PROBABILITY_THRESHOLD = exports.HISTORY_COUNT = exports.MASK_BLUR_RADIUS = exports.DEBOUNCE = exports.BLUR_FILTER_RADIUS = void 0; +exports.BLUR_FILTER_RADIUS = 15; +exports.DEBOUNCE = 2; +exports.MASK_BLUR_RADIUS = 5; +exports.HISTORY_COUNT = 5; +exports.PERSON_PROBABILITY_THRESHOLD = 0.4; +exports.MODEL_NAME = 'selfie_segmentation_landscape.tflite'; +exports.TFLITE_LOADER_NAME = 'tflite-1-0-0.js'; +exports.TFLITE_SIMD_LOADER_NAME = 'tflite-simd-1-0-0.js'; +exports.MODEL_CONFIG = { + architecture: 'MobileNetV1', + outputStride: 16, + multiplier: 0.75, + quantBytes: 4, +}; +exports.INFERENCE_CONFIG = { + internalResolution: 1, + maxDetections: 1, + segmentationThreshold: 0.75, +}; +exports.BODYPIX_INFERENCE_DIMENSIONS = { + width: 224, + height: 224, +}; +exports.WASM_INFERENCE_DIMENSIONS = { + width: 256, + height: 144, +}; +//# sourceMappingURL=constants.js.map \ No newline at end of file diff --git a/es5/constants.js.map b/es5/constants.js.map new file mode 100644 index 0000000..3881438 --- /dev/null +++ b/es5/constants.js.map @@ -0,0 +1 @@ +{"version":3,"file":"constants.js","sourceRoot":"","sources":["../lib/constants.ts"],"names":[],"mappings":";;;AAGa,QAAA,kBAAkB,GAAG,EAAE,CAAC;AACxB,QAAA,QAAQ,GAAG,CAAC,CAAC;AACb,QAAA,gBAAgB,GAAG,CAAC,CAAC;AACrB,QAAA,aAAa,GAAG,CAAC,CAAC;AAClB,QAAA,4BAA4B,GAAG,GAAG,CAAC;AACnC,QAAA,UAAU,GAAG,sCAAsC,CAAC;AACpD,QAAA,kBAAkB,GAAG,iBAAiB,CAAC;AACvC,QAAA,uBAAuB,GAAG,sBAAsB,CAAC;AAEjD,QAAA,YAAY,GAAgB;IACvC,YAAY,EAAE,aAAa;IAC3B,YAAY,EAAE,EAAE;IAChB,UAAU,EAAE,IAAI;IAChB,UAAU,EAAE,CAAC;CACd,CAAC;AAEW,QAAA,gBAAgB,GAA0B;IACrD,kBAAkB,EAAE,CAAC;IACrB,aAAa,EAAE,CAAC;IAChB,qBAAqB,EAAE,IAAI;CAC5B,CAAC;AAEW,QAAA,4BAA4B,GAAe;IACtD,KAAK,EAAE,GAAG;IACV,MAAM,EAAE,GAAG;CACZ,CAAC;AAEW,QAAA,yBAAyB,GAAe;IACnD,KAAK,EAAE,GAAG;IACV,MAAM,EAAE,GAAG;CACZ,CAAC","sourcesContent":["import { ModelConfig, PersonInferenceConfig } from '@tensorflow-models/body-pix/dist/body_pix_model';\nimport { Dimensions } from './types';\n\nexport const BLUR_FILTER_RADIUS = 15;\nexport const DEBOUNCE = 2;\nexport const MASK_BLUR_RADIUS = 5;\nexport const HISTORY_COUNT = 5;\nexport const PERSON_PROBABILITY_THRESHOLD = 0.4;\nexport const MODEL_NAME = 'selfie_segmentation_landscape.tflite';\nexport const TFLITE_LOADER_NAME = 'tflite-1-0-0.js';\nexport const TFLITE_SIMD_LOADER_NAME = 'tflite-simd-1-0-0.js';\n\nexport const MODEL_CONFIG: ModelConfig = {\n architecture: 'MobileNetV1',\n outputStride: 16,\n multiplier: 0.75,\n quantBytes: 4,\n};\n\nexport const INFERENCE_CONFIG: PersonInferenceConfig = {\n internalResolution: 1,\n maxDetections: 1,\n segmentationThreshold: 0.75,\n};\n\nexport const BODYPIX_INFERENCE_DIMENSIONS: Dimensions = {\n width: 224,\n height: 224,\n};\n\nexport const WASM_INFERENCE_DIMENSIONS: Dimensions = {\n width: 256,\n height: 144,\n};\n"]} \ No newline at end of file diff --git a/es5/index.d.ts b/es5/index.d.ts new file mode 100644 index 0000000..bde98a9 --- /dev/null +++ b/es5/index.d.ts @@ -0,0 +1,6 @@ +import { GaussianBlurBackgroundProcessor, GaussianBlurBackgroundProcessorOptions } from './processors/background/GaussianBlurBackgroundProcessor'; +import { VirtualBackgroundProcessor, VirtualBackgroundProcessorOptions } from './processors/background/VirtualBackgroundProcessor'; +import { ImageFit } from './types'; +import { isSupported } from './utils/support'; +import { version } from './utils/version'; +export { GaussianBlurBackgroundProcessor, GaussianBlurBackgroundProcessorOptions, ImageFit, isSupported, version, VirtualBackgroundProcessor, VirtualBackgroundProcessorOptions, }; diff --git a/es5/index.js b/es5/index.js new file mode 100644 index 0000000..6c063af --- /dev/null +++ b/es5/index.js @@ -0,0 +1,31 @@ +"use strict"; +var __assign = (this && this.__assign) || function () { + __assign = Object.assign || function(t) { + for (var s, i = 1, n = arguments.length; i < n; i++) { + s = arguments[i]; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) + t[p] = s[p]; + } + return t; + }; + return __assign.apply(this, arguments); +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.VirtualBackgroundProcessor = exports.version = exports.isSupported = exports.ImageFit = exports.GaussianBlurBackgroundProcessor = void 0; +var GaussianBlurBackgroundProcessor_1 = require("./processors/background/GaussianBlurBackgroundProcessor"); +Object.defineProperty(exports, "GaussianBlurBackgroundProcessor", { enumerable: true, get: function () { return GaussianBlurBackgroundProcessor_1.GaussianBlurBackgroundProcessor; } }); +var VirtualBackgroundProcessor_1 = require("./processors/background/VirtualBackgroundProcessor"); +Object.defineProperty(exports, "VirtualBackgroundProcessor", { enumerable: true, get: function () { return VirtualBackgroundProcessor_1.VirtualBackgroundProcessor; } }); +var types_1 = require("./types"); +Object.defineProperty(exports, "ImageFit", { enumerable: true, get: function () { return types_1.ImageFit; } }); +var support_1 = require("./utils/support"); +Object.defineProperty(exports, "isSupported", { enumerable: true, get: function () { return support_1.isSupported; } }); +var version_1 = require("./utils/version"); +Object.defineProperty(exports, "version", { enumerable: true, get: function () { return version_1.version; } }); +window.Twilio = window.Twilio || {}; +window.Twilio.VideoProcessors = __assign(__assign({}, window.Twilio.VideoProcessors), { GaussianBlurBackgroundProcessor: GaussianBlurBackgroundProcessor_1.GaussianBlurBackgroundProcessor, + ImageFit: types_1.ImageFit, + isSupported: support_1.isSupported, + version: version_1.version, + VirtualBackgroundProcessor: VirtualBackgroundProcessor_1.VirtualBackgroundProcessor }); +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/es5/index.js.map b/es5/index.js.map new file mode 100644 index 0000000..507f783 --- /dev/null +++ b/es5/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sourceRoot":"","sources":["../lib/index.ts"],"names":[],"mappings":";;;;;;;;;;;;;;AAAA,2GAAkJ;AAiBhJ,gHAjBO,iEAA+B,OAiBP;AAhBjC,iGAAmI;AAqBjI,2GArBO,uDAA0B,OAqBP;AApB5B,iCAAmC;AAiBjC,yFAjBO,gBAAQ,OAiBP;AAhBV,2CAA8C;AAiB5C,4FAjBO,qBAAW,OAiBP;AAhBb,2CAA0C;AAiBxC,wFAjBO,iBAAO,OAiBP;AAfT,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,EAAE,CAAC;AACpC,MAAM,CAAC,MAAM,CAAC,eAAe,yBACxB,MAAM,CAAC,MAAM,CAAC,eAAe,KAChC,+BAA+B,mEAAA;IAC/B,QAAQ,kBAAA;IACR,WAAW,uBAAA;IACX,OAAO,mBAAA;IACP,0BAA0B,yDAAA,GAC3B,CAAC","sourcesContent":["import { GaussianBlurBackgroundProcessor, GaussianBlurBackgroundProcessorOptions } from './processors/background/GaussianBlurBackgroundProcessor';\nimport { VirtualBackgroundProcessor, VirtualBackgroundProcessorOptions } from './processors/background/VirtualBackgroundProcessor';\nimport { ImageFit } from './types';\nimport { isSupported } from './utils/support';\nimport { version } from './utils/version';\n\nwindow.Twilio = window.Twilio || {};\nwindow.Twilio.VideoProcessors = {\n ...window.Twilio.VideoProcessors,\n GaussianBlurBackgroundProcessor,\n ImageFit,\n isSupported,\n version,\n VirtualBackgroundProcessor,\n};\n\nexport {\n GaussianBlurBackgroundProcessor,\n GaussianBlurBackgroundProcessorOptions,\n ImageFit,\n isSupported,\n version,\n VirtualBackgroundProcessor,\n VirtualBackgroundProcessorOptions,\n};\n"]} \ No newline at end of file diff --git a/es5/processors/Processor.d.ts b/es5/processors/Processor.d.ts new file mode 100644 index 0000000..1ec81b6 --- /dev/null +++ b/es5/processors/Processor.d.ts @@ -0,0 +1,13 @@ +/** + * @private + * The [[Processor]] is an abstract class for building your own custom processors. + */ +export declare abstract class Processor { + /** + * Applies a transform to an input frame and draw the results to an output frame buffer. + * The frame will be dropped if this method raises an exception. + * @param inputFrameBuffer - The source of the input frame to process. + * @param outputFrameBuffer - The output frame buffer to use to draw the processed frame. + */ + abstract processFrame(inputFrameBuffer: OffscreenCanvas, outputFrameBuffer: HTMLCanvasElement): Promise | void; +} diff --git a/es5/processors/Processor.js b/es5/processors/Processor.js new file mode 100644 index 0000000..3bf6156 --- /dev/null +++ b/es5/processors/Processor.js @@ -0,0 +1,14 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Processor = void 0; +/** + * @private + * The [[Processor]] is an abstract class for building your own custom processors. + */ +var Processor = /** @class */ (function () { + function Processor() { + } + return Processor; +}()); +exports.Processor = Processor; +//# sourceMappingURL=Processor.js.map \ No newline at end of file diff --git a/es5/processors/Processor.js.map b/es5/processors/Processor.js.map new file mode 100644 index 0000000..44fa62d --- /dev/null +++ b/es5/processors/Processor.js.map @@ -0,0 +1 @@ +{"version":3,"file":"Processor.js","sourceRoot":"","sources":["../../lib/processors/Processor.ts"],"names":[],"mappings":";;;AAAA;;;GAGG;AACH;IAAA;IASA,CAAC;IAAD,gBAAC;AAAD,CAAC,AATD,IASC;AATqB,8BAAS","sourcesContent":["/**\n * @private\n * The [[Processor]] is an abstract class for building your own custom processors.\n */\nexport abstract class Processor {\n\n /**\n * Applies a transform to an input frame and draw the results to an output frame buffer.\n * The frame will be dropped if this method raises an exception.\n * @param inputFrameBuffer - The source of the input frame to process.\n * @param outputFrameBuffer - The output frame buffer to use to draw the processed frame.\n */\n abstract processFrame(inputFrameBuffer: OffscreenCanvas, outputFrameBuffer: HTMLCanvasElement): Promise | void;\n}\n"]} \ No newline at end of file diff --git a/es5/processors/background/BackgroundProcessor.d.ts b/es5/processors/background/BackgroundProcessor.d.ts new file mode 100644 index 0000000..6ad2843 --- /dev/null +++ b/es5/processors/background/BackgroundProcessor.d.ts @@ -0,0 +1,135 @@ +import '@tensorflow/tfjs-backend-webgl'; +import '@tensorflow/tfjs-backend-cpu'; +import { PersonInferenceConfig } from '@tensorflow-models/body-pix/dist/body_pix_model'; +import { Processor } from '../Processor'; +import { Dimensions } from '../../types'; +/** + * @private + */ +export interface BackgroundProcessorOptions { + /** + * The VideoProcessors load assets dynamically depending on certain browser features. + * You need to serve all the assets and provide the root path so they can be referenced properly. + * These assets can be copied from the `dist/build` folder which you can add as part of your deployment process. + * @example + *
+ *
+ * For virtual background: + *
+ * + * ```ts + * const virtualBackground = new VirtualBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets', + * backgroundImage: img, + * }); + * await virtualBackground.loadModel(); + * ``` + * + *
+ * For blur background: + *
+ * + * ```ts + * const blurBackground = new GaussianBlurBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets' + * }); + * await blurBackground.loadModel(); + * ``` + */ + assetsPath: string; + /** + * @private + */ + debounce?: number; + /** + * @private + */ + historyCount?: number; + /** + * @private + */ + inferenceConfig?: PersonInferenceConfig; + /** + * @private + */ + inferenceDimensions?: Dimensions; + /** + * The blur radius to use when smoothing out the edges of the person's mask. + * @default + * ```html + * 5 + * ``` + */ + maskBlurRadius?: number; + /** + * @private + */ + personProbabilityThreshold?: number; + /** + * @private + */ + useWasm?: boolean; +} +/** + * @private + */ +export declare abstract class BackgroundProcessor extends Processor { + private static _model; + private static _loadModel; + protected _outputCanvas: HTMLCanvasElement; + protected _outputContext: CanvasRenderingContext2D; + private _assetsPath; + private _benchmark; + private _currentMask; + private _debounce; + private _dummyImageData; + private _historyCount; + private _inferenceConfig; + private _inferenceDimensions; + private _inputCanvas; + private _inputContext; + private _inputMemoryOffset; + private _isSimdEnabled; + private _maskBlurRadius; + private _maskCanvas; + private _maskContext; + private _masks; + private _maskUsageCounter; + private _outputMemoryOffset; + private _personProbabilityThreshold; + private _tflite; + private _useWasm; + private readonly _version; + constructor(options: BackgroundProcessorOptions); + /** + * The current blur radius when smoothing out the edges of the person's mask. + */ + get maskBlurRadius(): number; + /** + * Set a new blur radius to be used when smoothing out the edges of the person's mask. + */ + set maskBlurRadius(radius: number); + /** + * Load the segmentation model. + * Call this method before attaching the processor to ensure + * video frames are processed correctly. + */ + loadModel(): Promise; + /** + * Apply a transform to the background of an input video frame and leaving + * the foreground (person(s)) untouched. Any exception detected will + * result in the frame being dropped. + * @param inputFrameBuffer - The source of the input frame to process. + * @param outputFrameBuffer - The output frame buffer to use to draw the processed frame. + */ + processFrame(inputFrameBuffer: OffscreenCanvas, outputFrameBuffer: HTMLCanvasElement): Promise; + protected abstract _setBackground(inputFrame: OffscreenCanvas): void; + private _addMask; + private _applyAlpha; + private _createPersonMask; + private _getResizedInputImageData; + private _loadJs; + private _loadTwilioTfLite; + private _runBodyPixInference; + private _runTwilioTfLiteInference; +} diff --git a/es5/processors/background/BackgroundProcessor.js b/es5/processors/background/BackgroundProcessor.js new file mode 100644 index 0000000..e3e6f8c --- /dev/null +++ b/es5/processors/background/BackgroundProcessor.js @@ -0,0 +1,380 @@ +"use strict"; +var __extends = (this && this.__extends) || (function () { + var extendStatics = function (d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; + return extendStatics(d, b); + }; + return function (d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +})(); +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __generator = (this && this.__generator) || function (thisArg, body) { + var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; + return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; + function verb(n) { return function (v) { return step([n, v]); }; } + function step(op) { + if (f) throw new TypeError("Generator is already executing."); + while (_) try { + if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; + if (y = 0, t) op = [op[0] & 2, t.value]; + switch (op[0]) { + case 0: case 1: t = op; break; + case 4: _.label++; return { value: op[1], done: false }; + case 5: _.label++; y = op[1]; op = [0]; continue; + case 7: op = _.ops.pop(); _.trys.pop(); continue; + default: + if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } + if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } + if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } + if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } + if (t[2]) _.ops.pop(); + _.trys.pop(); continue; + } + op = body.call(thisArg, _); + } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } + if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; + } +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.BackgroundProcessor = void 0; +require("@tensorflow/tfjs-backend-webgl"); +require("@tensorflow/tfjs-backend-cpu"); +var body_pix_1 = require("@tensorflow-models/body-pix"); +var Processor_1 = require("../Processor"); +var Benchmark_1 = require("../../utils/Benchmark"); +var version_1 = require("../../utils/version"); +var constants_1 = require("../../constants"); +/** + * @private + */ +var BackgroundProcessor = /** @class */ (function (_super) { + __extends(BackgroundProcessor, _super); + function BackgroundProcessor(options) { + var _this = _super.call(this) || this; + _this._currentMask = new Uint8ClampedArray(); + _this._debounce = constants_1.DEBOUNCE; + _this._dummyImageData = new ImageData(1, 1); + _this._historyCount = constants_1.HISTORY_COUNT; + _this._inferenceConfig = constants_1.INFERENCE_CONFIG; + _this._inferenceDimensions = constants_1.WASM_INFERENCE_DIMENSIONS; + _this._inputMemoryOffset = 0; + // tslint:disable-next-line no-unused-variable + _this._isSimdEnabled = null; + _this._maskBlurRadius = constants_1.MASK_BLUR_RADIUS; + _this._maskUsageCounter = 0; + _this._outputMemoryOffset = 0; + _this._personProbabilityThreshold = constants_1.PERSON_PROBABILITY_THRESHOLD; + // tslint:disable-next-line no-unused-variable + _this._version = version_1.version; + if (typeof options.assetsPath !== 'string') { + throw new Error('assetsPath parameter is missing'); + } + var assetsPath = options.assetsPath; + if (assetsPath && assetsPath[assetsPath.length - 1] !== '/') { + assetsPath += '/'; + } + _this.maskBlurRadius = options.maskBlurRadius; + _this._assetsPath = assetsPath; + _this._debounce = options.debounce || constants_1.DEBOUNCE; + _this._historyCount = options.historyCount || constants_1.HISTORY_COUNT; + _this._inferenceConfig = options.inferenceConfig || constants_1.INFERENCE_CONFIG; + _this._personProbabilityThreshold = options.personProbabilityThreshold || constants_1.PERSON_PROBABILITY_THRESHOLD; + _this._useWasm = typeof options.useWasm === 'boolean' ? options.useWasm : true; + _this._inferenceDimensions = options.inferenceDimensions || + (_this._useWasm ? constants_1.WASM_INFERENCE_DIMENSIONS : constants_1.BODYPIX_INFERENCE_DIMENSIONS); + _this._benchmark = new Benchmark_1.Benchmark(); + _this._inputCanvas = document.createElement('canvas'); + _this._inputContext = _this._inputCanvas.getContext('2d'); + _this._maskCanvas = new OffscreenCanvas(1, 1); + _this._maskContext = _this._maskCanvas.getContext('2d'); + _this._outputCanvas = document.createElement('canvas'); + _this._outputContext = _this._outputCanvas.getContext('2d'); + _this._masks = []; + return _this; + } + BackgroundProcessor._loadModel = function (config) { + if (config === void 0) { config = constants_1.MODEL_CONFIG; } + return __awaiter(this, void 0, void 0, function () { + var _a; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + _a = BackgroundProcessor; + return [4 /*yield*/, body_pix_1.load(config) + .catch(function (error) { return console.error('Unable to load model.', error); })]; + case 1: + _a._model = (_b.sent()) || null; + return [2 /*return*/]; + } + }); + }); + }; + Object.defineProperty(BackgroundProcessor.prototype, "maskBlurRadius", { + /** + * The current blur radius when smoothing out the edges of the person's mask. + */ + get: function () { + return this._maskBlurRadius; + }, + /** + * Set a new blur radius to be used when smoothing out the edges of the person's mask. + */ + set: function (radius) { + if (typeof radius !== 'number' || radius < 0) { + console.warn("Valid mask blur radius not found. Using " + constants_1.MASK_BLUR_RADIUS + " as default."); + radius = constants_1.MASK_BLUR_RADIUS; + } + this._maskBlurRadius = radius; + }, + enumerable: false, + configurable: true + }); + /** + * Load the segmentation model. + * Call this method before attaching the processor to ensure + * video frames are processed correctly. + */ + BackgroundProcessor.prototype.loadModel = function () { + return __awaiter(this, void 0, void 0, function () { + var _a, tflite, modelResponse, model, modelBufferOffset; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: return [4 /*yield*/, Promise.all([ + BackgroundProcessor._loadModel(), + this._loadTwilioTfLite(), + fetch(this._assetsPath + constants_1.MODEL_NAME), + ])]; + case 1: + _a = _b.sent(), tflite = _a[1], modelResponse = _a[2]; + return [4 /*yield*/, modelResponse.arrayBuffer()]; + case 2: + model = _b.sent(); + modelBufferOffset = tflite._getModelBufferMemoryOffset(); + tflite.HEAPU8.set(new Uint8Array(model), modelBufferOffset); + tflite._loadModel(model.byteLength); + this._inputMemoryOffset = tflite._getInputMemoryOffset() / 4; + this._outputMemoryOffset = tflite._getOutputMemoryOffset() / 4; + this._tflite = tflite; + return [2 /*return*/]; + } + }); + }); + }; + /** + * Apply a transform to the background of an input video frame and leaving + * the foreground (person(s)) untouched. Any exception detected will + * result in the frame being dropped. + * @param inputFrameBuffer - The source of the input frame to process. + * @param outputFrameBuffer - The output frame buffer to use to draw the processed frame. + */ + BackgroundProcessor.prototype.processFrame = function (inputFrameBuffer, outputFrameBuffer) { + return __awaiter(this, void 0, void 0, function () { + var inputFrame, captureWidth, captureHeight, _a, inferenceWidth, inferenceHeight, reInitDummyImage, personMask; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: + if (!BackgroundProcessor._model || !this._tflite) { + return [2 /*return*/]; + } + if (!inputFrameBuffer || !outputFrameBuffer) { + throw new Error('Missing input or output frame buffer'); + } + this._benchmark.end('captureFrameDelay'); + this._benchmark.start('processFrameDelay'); + inputFrame = inputFrameBuffer; + captureWidth = inputFrame.width, captureHeight = inputFrame.height; + _a = this._inferenceDimensions, inferenceWidth = _a.width, inferenceHeight = _a.height; + if (this._outputCanvas !== outputFrameBuffer) { + this._outputCanvas = outputFrameBuffer; + this._outputContext = outputFrameBuffer.getContext('2d'); + } + reInitDummyImage = false; + if (this._inputCanvas.width !== inferenceWidth) { + this._inputCanvas.width = inferenceWidth; + this._maskCanvas.width = inferenceWidth; + reInitDummyImage = true; + } + if (this._inputCanvas.height !== inferenceHeight) { + this._inputCanvas.height = inferenceHeight; + this._maskCanvas.height = inferenceHeight; + reInitDummyImage = true; + } + if (reInitDummyImage) { + this._dummyImageData = new ImageData(new Uint8ClampedArray(inferenceWidth * inferenceHeight * 4), inferenceWidth, inferenceHeight); + } + return [4 /*yield*/, this._createPersonMask(inputFrame)]; + case 1: + personMask = _b.sent(); + this._benchmark.start('imageCompositionDelay'); + this._maskContext.putImageData(personMask, 0, 0); + this._outputContext.save(); + this._outputContext.filter = "blur(" + this._maskBlurRadius + "px)"; + this._outputContext.globalCompositeOperation = 'copy'; + this._outputContext.drawImage(this._maskCanvas, 0, 0, captureWidth, captureHeight); + this._outputContext.filter = 'none'; + this._outputContext.globalCompositeOperation = 'source-in'; + this._outputContext.drawImage(inputFrame, 0, 0, captureWidth, captureHeight); + this._outputContext.globalCompositeOperation = 'destination-over'; + this._setBackground(inputFrame); + this._outputContext.restore(); + this._benchmark.end('imageCompositionDelay'); + this._benchmark.end('processFrameDelay'); + this._benchmark.end('totalProcessingDelay'); + // NOTE (csantos): Start the benchmark from here so we can include the delay from the Video sdk + // for a more accurate fps + this._benchmark.start('totalProcessingDelay'); + this._benchmark.start('captureFrameDelay'); + return [2 /*return*/]; + } + }); + }); + }; + BackgroundProcessor.prototype._addMask = function (mask) { + if (this._masks.length >= this._historyCount) { + this._masks.splice(0, this._masks.length - this._historyCount + 1); + } + this._masks.push(mask); + }; + BackgroundProcessor.prototype._applyAlpha = function (imageData) { + var weightedSum = this._masks.reduce(function (sum, mask, j) { return sum + (j + 1) * (j + 1); }, 0); + var pixels = imageData.height * imageData.width; + var _loop_1 = function (i) { + var w = this_1._masks.reduce(function (sum, mask, j) { return sum + mask[i] * (j + 1) * (j + 1); }, 0) / weightedSum; + imageData.data[i * 4 + 3] = Math.round(w * 255); + }; + var this_1 = this; + for (var i = 0; i < pixels; i++) { + _loop_1(i); + } + }; + BackgroundProcessor.prototype._createPersonMask = function (inputFrame) { + return __awaiter(this, void 0, void 0, function () { + var imageData, shouldRunInference, _a, _b; + return __generator(this, function (_c) { + switch (_c.label) { + case 0: + imageData = this._dummyImageData; + shouldRunInference = this._maskUsageCounter < 1; + this._benchmark.start('inputImageResizeDelay'); + if (shouldRunInference) { + imageData = this._getResizedInputImageData(inputFrame); + } + this._benchmark.end('inputImageResizeDelay'); + this._benchmark.start('segmentationDelay'); + if (!shouldRunInference) return [3 /*break*/, 4]; + _a = this; + if (!this._useWasm) return [3 /*break*/, 1]; + _b = this._runTwilioTfLiteInference(imageData); + return [3 /*break*/, 3]; + case 1: return [4 /*yield*/, this._runBodyPixInference(imageData)]; + case 2: + _b = _c.sent(); + _c.label = 3; + case 3: + _a._currentMask = _b; + this._maskUsageCounter = this._debounce; + _c.label = 4; + case 4: + this._addMask(this._currentMask); + this._applyAlpha(imageData); + this._maskUsageCounter--; + this._benchmark.end('segmentationDelay'); + return [2 /*return*/, imageData]; + } + }); + }); + }; + BackgroundProcessor.prototype._getResizedInputImageData = function (inputFrame) { + var _a = this._inputCanvas, width = _a.width, height = _a.height; + this._inputContext.drawImage(inputFrame, 0, 0, width, height); + var imageData = this._inputContext.getImageData(0, 0, width, height); + return imageData; + }; + BackgroundProcessor.prototype._loadJs = function (url) { + return new Promise(function (resolve, reject) { + var script = document.createElement('script'); + script.onload = function () { return resolve(); }; + script.onerror = reject; + document.head.append(script); + script.src = url; + }); + }; + BackgroundProcessor.prototype._loadTwilioTfLite = function () { + return __awaiter(this, void 0, void 0, function () { + var tflite, _a; + return __generator(this, function (_b) { + switch (_b.label) { + case 0: return [4 /*yield*/, this._loadJs(this._assetsPath + constants_1.TFLITE_SIMD_LOADER_NAME)]; + case 1: + _b.sent(); + _b.label = 2; + case 2: + _b.trys.push([2, 4, , 7]); + return [4 /*yield*/, window.createTwilioTFLiteSIMDModule()]; + case 3: + tflite = _b.sent(); + this._isSimdEnabled = true; + return [3 /*break*/, 7]; + case 4: + _a = _b.sent(); + console.warn('SIMD not supported. You may experience poor quality of background replacement.'); + return [4 /*yield*/, this._loadJs(this._assetsPath + constants_1.TFLITE_LOADER_NAME)]; + case 5: + _b.sent(); + return [4 /*yield*/, window.createTwilioTFLiteModule()]; + case 6: + tflite = _b.sent(); + this._isSimdEnabled = false; + return [3 /*break*/, 7]; + case 7: return [2 /*return*/, tflite]; + } + }); + }); + }; + BackgroundProcessor.prototype._runBodyPixInference = function (inputImage) { + return __awaiter(this, void 0, void 0, function () { + var segment; + return __generator(this, function (_a) { + switch (_a.label) { + case 0: return [4 /*yield*/, BackgroundProcessor._model.segmentPerson(inputImage, this._inferenceConfig)]; + case 1: + segment = _a.sent(); + return [2 /*return*/, segment.data]; + } + }); + }); + }; + BackgroundProcessor.prototype._runTwilioTfLiteInference = function (inputImage) { + var _a = this, _b = _a._inferenceDimensions, width = _b.width, height = _b.height, offset = _a._inputMemoryOffset, tflite = _a._tflite; + var pixels = width * height; + for (var i = 0; i < pixels; i++) { + tflite.HEAPF32[offset + i * 3] = inputImage.data[i * 4] / 255; + tflite.HEAPF32[offset + i * 3 + 1] = inputImage.data[i * 4 + 1] / 255; + tflite.HEAPF32[offset + i * 3 + 2] = inputImage.data[i * 4 + 2] / 255; + } + tflite._runInference(); + var inferenceData = new Uint8ClampedArray(pixels * 4); + for (var i = 0; i < pixels; i++) { + var personProbability = tflite.HEAPF32[this._outputMemoryOffset + i]; + inferenceData[i] = Number(personProbability >= this._personProbabilityThreshold) * personProbability; + } + return inferenceData; + }; + BackgroundProcessor._model = null; + return BackgroundProcessor; +}(Processor_1.Processor)); +exports.BackgroundProcessor = BackgroundProcessor; +//# sourceMappingURL=BackgroundProcessor.js.map \ No newline at end of file diff --git a/es5/processors/background/BackgroundProcessor.js.map b/es5/processors/background/BackgroundProcessor.js.map new file mode 100644 index 0000000..0a81ead --- /dev/null +++ b/es5/processors/background/BackgroundProcessor.js.map @@ -0,0 +1 @@ +{"version":3,"file":"BackgroundProcessor.js","sourceRoot":"","sources":["../../../lib/processors/background/BackgroundProcessor.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,0CAAwC;AACxC,wCAAsC;AAEtC,wDAAyE;AACzE,0CAAyC;AACzC,mDAAkD;AAClD,+CAA8C;AAG9C,6CAYyB;AA6EzB;;GAEG;AACH;IAAkD,uCAAS;IAkCzD,6BAAY,OAAmC;QAA/C,YACE,iBAAO,SA4BR;QApDO,kBAAY,GAAmC,IAAI,iBAAiB,EAAE,CAAC;QACvE,eAAS,GAAW,oBAAQ,CAAC;QAC7B,qBAAe,GAAc,IAAI,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QACjD,mBAAa,GAAW,yBAAa,CAAC;QACtC,sBAAgB,GAA0B,4BAAgB,CAAC;QAC3D,0BAAoB,GAAe,qCAAyB,CAAC;QAG7D,wBAAkB,GAAW,CAAC,CAAC;QACvC,8CAA8C;QACtC,oBAAc,GAAmB,IAAI,CAAC;QACtC,qBAAe,GAAW,4BAAgB,CAAC;QAI3C,uBAAiB,GAAW,CAAC,CAAC;QAC9B,yBAAmB,GAAW,CAAC,CAAC;QAChC,iCAA2B,GAAW,wCAA4B,CAAC;QAG3E,8CAA8C;QAC7B,cAAQ,GAAW,iBAAO,CAAC;QAK1C,IAAI,OAAO,OAAO,CAAC,UAAU,KAAK,QAAQ,EAAE;YAC1C,MAAM,IAAI,KAAK,CAAC,iCAAiC,CAAC,CAAC;SACpD;QACD,IAAI,UAAU,GAAG,OAAO,CAAC,UAAU,CAAC;QACpC,IAAI,UAAU,IAAI,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,KAAK,GAAG,EAAE;YAC3D,UAAU,IAAI,GAAG,CAAC;SACnB;QAED,KAAI,CAAC,cAAc,GAAG,OAAO,CAAC,cAAe,CAAC;QAC9C,KAAI,CAAC,WAAW,GAAG,UAAU,CAAC;QAC9B,KAAI,CAAC,SAAS,GAAG,OAAO,CAAC,QAAS,IAAI,oBAAQ,CAAC;QAC/C,KAAI,CAAC,aAAa,GAAG,OAAO,CAAC,YAAa,IAAI,yBAAa,CAAC;QAC5D,KAAI,CAAC,gBAAgB,GAAG,OAAO,CAAC,eAAgB,IAAI,4BAAgB,CAAC;QACrE,KAAI,CAAC,2BAA2B,GAAG,OAAO,CAAC,0BAA2B,IAAI,wCAA4B,CAAC;QACvG,KAAI,CAAC,QAAQ,GAAG,OAAO,OAAO,CAAC,OAAO,KAAK,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;QAC9E,KAAI,CAAC,oBAAoB,GAAG,OAAO,CAAC,mBAAoB;YACtD,CAAC,KAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,qCAAyB,CAAC,CAAC,CAAC,wCAA4B,CAAC,CAAC;QAE7E,KAAI,CAAC,UAAU,GAAG,IAAI,qBAAS,EAAE,CAAC;QAClC,KAAI,CAAC,YAAY,GAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;QACrD,KAAI,CAAC,aAAa,GAAG,KAAI,CAAC,YAAY,CAAC,UAAU,CAAC,IAAI,CAA6B,CAAC;QACpF,KAAI,CAAC,WAAW,GAAG,IAAI,eAAe,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QAC7C,KAAI,CAAC,YAAY,GAAG,KAAI,CAAC,WAAW,CAAC,UAAU,CAAC,IAAI,CAAsC,CAAC;QAC3F,KAAI,CAAC,aAAa,GAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;QACtD,KAAI,CAAC,cAAc,GAAG,KAAI,CAAC,aAAa,CAAC,UAAU,CAAC,IAAI,CAA6B,CAAC;QACtF,KAAI,CAAC,MAAM,GAAG,EAAE,CAAC;;IACnB,CAAC;IA7DoB,8BAAU,GAA/B,UAAgC,MAAkC;QAAlC,uBAAA,EAAA,SAAsB,wBAAY;;;;;;wBAChE,KAAA,mBAAmB,CAAA;wBAAU,qBAAM,eAAS,CAAC,MAAM,CAAC;iCACjD,KAAK,CAAC,UAAC,KAAU,IAAK,OAAA,OAAO,CAAC,KAAK,CAAC,uBAAuB,EAAE,KAAK,CAAC,EAA7C,CAA6C,CAAC,EAAA;;wBADvE,GAAoB,MAAM,GAAG,CAAA,SAC0C,KAAI,IAAI,CAAC;;;;;KACjF;IA+DD,sBAAI,+CAAc;QAHlB;;WAEG;aACH;YACE,OAAO,IAAI,CAAC,eAAe,CAAC;QAC9B,CAAC;QAED;;WAEG;aACH,UAAmB,MAAc;YAC/B,IAAI,OAAO,MAAM,KAAK,QAAQ,IAAI,MAAM,GAAG,CAAC,EAAE;gBAC5C,OAAO,CAAC,IAAI,CAAC,6CAA2C,4BAAgB,iBAAc,CAAC,CAAC;gBACxF,MAAM,GAAG,4BAAgB,CAAC;aAC3B;YACD,IAAI,CAAC,eAAe,GAAG,MAAM,CAAC;QAChC,CAAC;;;OAXA;IAaD;;;;OAIG;IACI,uCAAS,GAAf;;;;;4BACoC,qBAAM,OAAO,CAAC,GAAG,CAAC;4BACnD,mBAAmB,CAAC,UAAU,EAAE;4BAChC,IAAI,CAAC,iBAAiB,EAAE;4BACxB,KAAK,CAAC,IAAI,CAAC,WAAW,GAAG,sBAAU,CAAC;yBACrC,CAAC,EAAA;;wBAJI,KAA6B,SAIjC,EAJO,MAAM,QAAA,EAAE,aAAa,QAAA;wBAMhB,qBAAM,aAAa,CAAC,WAAW,EAAE,EAAA;;wBAAzC,KAAK,GAAG,SAAiC;wBACzC,iBAAiB,GAAG,MAAM,CAAC,2BAA2B,EAAE,CAAC;wBAC/D,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,UAAU,CAAC,KAAK,CAAC,EAAE,iBAAiB,CAAC,CAAC;wBAC5D,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;wBAEpC,IAAI,CAAC,kBAAkB,GAAG,MAAM,CAAC,qBAAqB,EAAE,GAAG,CAAC,CAAC;wBAC7D,IAAI,CAAC,mBAAmB,GAAG,MAAM,CAAC,sBAAsB,EAAE,GAAG,CAAC,CAAC;wBAE/D,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC;;;;;KACvB;IAED;;;;;;OAMG;IACG,0CAAY,GAAlB,UAAmB,gBAAiC,EAAE,iBAAoC;;;;;;wBACxF,IAAI,CAAC,mBAAmB,CAAC,MAAM,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE;4BAChD,sBAAO;yBACR;wBACD,IAAI,CAAC,gBAAgB,IAAI,CAAC,iBAAiB,EAAE;4BAC3C,MAAM,IAAI,KAAK,CAAC,sCAAsC,CAAC,CAAC;yBACzD;wBACD,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,mBAAmB,CAAC,CAAC;wBACzC,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,mBAAmB,CAAC,CAAC;wBAErC,UAAU,GAAG,gBAAgB,CAAC;wBACrB,YAAY,GAA4B,UAAU,MAAtC,EAAU,aAAa,GAAK,UAAU,OAAf,CAAgB;wBAC5D,KAAqD,IAAI,CAAC,oBAAoB,EAArE,cAAc,WAAA,EAAU,eAAe,YAAA,CAA+B;wBAErF,IAAI,IAAI,CAAC,aAAa,KAAK,iBAAiB,EAAE;4BAC5C,IAAI,CAAC,aAAa,GAAG,iBAAiB,CAAC;4BACvC,IAAI,CAAC,cAAc,GAAG,iBAAiB,CAAC,UAAU,CAAC,IAAI,CAA6B,CAAC;yBACtF;wBAGG,gBAAgB,GAAG,KAAK,CAAC;wBAC7B,IAAI,IAAI,CAAC,YAAY,CAAC,KAAK,KAAK,cAAc,EAAE;4BAC9C,IAAI,CAAC,YAAY,CAAC,KAAK,GAAG,cAAc,CAAC;4BACzC,IAAI,CAAC,WAAW,CAAC,KAAK,GAAG,cAAc,CAAC;4BACxC,gBAAgB,GAAG,IAAI,CAAC;yBACzB;wBACD,IAAI,IAAI,CAAC,YAAY,CAAC,MAAM,KAAK,eAAe,EAAE;4BAChD,IAAI,CAAC,YAAY,CAAC,MAAM,GAAG,eAAe,CAAC;4BAC3C,IAAI,CAAC,WAAW,CAAC,MAAM,GAAG,eAAe,CAAC;4BAC1C,gBAAgB,GAAG,IAAI,CAAC;yBACzB;wBACD,IAAI,gBAAgB,EAAE;4BACpB,IAAI,CAAC,eAAe,GAAG,IAAI,SAAS,CAClC,IAAI,iBAAiB,CAAC,cAAc,GAAG,eAAe,GAAG,CAAC,CAAC,EAC3D,cAAc,EAAE,eAAe,CAAC,CAAC;yBACpC;wBAEkB,qBAAM,IAAI,CAAC,iBAAiB,CAAC,UAAU,CAAC,EAAA;;wBAArD,UAAU,GAAG,SAAwC;wBAE3D,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,uBAAuB,CAAC,CAAC;wBAC/C,IAAI,CAAC,YAAY,CAAC,YAAY,CAAC,UAAU,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;wBACjD,IAAI,CAAC,cAAc,CAAC,IAAI,EAAE,CAAC;wBAC3B,IAAI,CAAC,cAAc,CAAC,MAAM,GAAG,UAAQ,IAAI,CAAC,eAAe,QAAK,CAAC;wBAC/D,IAAI,CAAC,cAAc,CAAC,wBAAwB,GAAG,MAAM,CAAC;wBACtD,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,IAAI,CAAC,WAAW,EAAE,CAAC,EAAE,CAAC,EAAE,YAAY,EAAE,aAAa,CAAC,CAAC;wBACnF,IAAI,CAAC,cAAc,CAAC,MAAM,GAAG,MAAM,CAAC;wBACpC,IAAI,CAAC,cAAc,CAAC,wBAAwB,GAAG,WAAW,CAAC;wBAC3D,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,UAAU,EAAE,CAAC,EAAE,CAAC,EAAE,YAAY,EAAE,aAAa,CAAC,CAAC;wBAC7E,IAAI,CAAC,cAAc,CAAC,wBAAwB,GAAG,kBAAkB,CAAC;wBAClE,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,CAAC;wBAChC,IAAI,CAAC,cAAc,CAAC,OAAO,EAAE,CAAC;wBAE9B,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,uBAAuB,CAAC,CAAC;wBAC7C,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,mBAAmB,CAAC,CAAC;wBACzC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,sBAAsB,CAAC,CAAC;wBAE5C,+FAA+F;wBAC/F,0BAA0B;wBAC1B,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,sBAAsB,CAAC,CAAC;wBAC9C,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,mBAAmB,CAAC,CAAC;;;;;KAC5C;IAIO,sCAAQ,GAAhB,UAAiB,IAAoC;QACnD,IAAI,IAAI,CAAC,MAAM,CAAC,MAAM,IAAI,IAAI,CAAC,aAAa,EAAE;YAC5C,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,MAAM,GAAG,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC,CAAC;SACpE;QACD,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;IACzB,CAAC;IAEO,yCAAW,GAAnB,UAAoB,SAAoB;QACtC,IAAM,WAAW,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,UAAC,GAAG,EAAE,IAAI,EAAE,CAAC,IAAK,OAAA,GAAG,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,EAAvB,CAAuB,EAAE,CAAC,CAAC,CAAC;QACrF,IAAM,MAAM,GAAG,SAAS,CAAC,MAAM,GAAG,SAAS,CAAC,KAAK,CAAC;gCACzC,CAAC;YACR,IAAM,CAAC,GAAG,OAAK,MAAM,CAAC,MAAM,CAAC,UAAC,GAAG,EAAE,IAAI,EAAE,CAAC,IAAK,OAAA,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,EAAjC,CAAiC,EAAE,CAAC,CAAC,GAAG,WAAW,CAAC;YACnG,SAAS,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,CAAC,CAAC;;;QAFlD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,EAAE,CAAC,EAAE;oBAAtB,CAAC;SAGT;IACH,CAAC;IAEa,+CAAiB,GAA/B,UAAgC,UAA2B;;;;;;wBACrD,SAAS,GAAG,IAAI,CAAC,eAAe,CAAC;wBAC/B,kBAAkB,GAAG,IAAI,CAAC,iBAAiB,GAAG,CAAC,CAAC;wBAEtD,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,uBAAuB,CAAC,CAAC;wBAC/C,IAAI,kBAAkB,EAAE;4BACtB,SAAS,GAAG,IAAI,CAAC,yBAAyB,CAAC,UAAU,CAAC,CAAC;yBACxD;wBACD,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,uBAAuB,CAAC,CAAC;wBAE7C,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,mBAAmB,CAAC,CAAC;6BACvC,kBAAkB,EAAlB,wBAAkB;wBACpB,KAAA,IAAI,CAAA;6BAAgB,IAAI,CAAC,QAAQ,EAAb,wBAAa;wBAC7B,KAAA,IAAI,CAAC,yBAAyB,CAAC,SAAS,CAAC,CAAA;;4BACzC,qBAAM,IAAI,CAAC,oBAAoB,CAAC,SAAS,CAAC,EAAA;;wBAA1C,KAAA,SAA0C,CAAA;;;wBAF9C,GAAK,YAAY,KAE6B,CAAC;wBAC/C,IAAI,CAAC,iBAAiB,GAAG,IAAI,CAAC,SAAS,CAAC;;;wBAE1C,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;wBACjC,IAAI,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;wBAC5B,IAAI,CAAC,iBAAiB,EAAE,CAAC;wBACzB,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,mBAAmB,CAAC,CAAC;wBAEzC,sBAAO,SAAS,EAAC;;;;KAClB;IAEO,uDAAyB,GAAjC,UAAkC,UAA2B;QACrD,IAAA,KAAoB,IAAI,CAAC,YAAY,EAAnC,KAAK,WAAA,EAAE,MAAM,YAAsB,CAAC;QAC5C,IAAI,CAAC,aAAa,CAAC,SAAS,CAAC,UAAU,EAAE,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;QAC9D,IAAM,SAAS,GAAG,IAAI,CAAC,aAAa,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;QACvE,OAAO,SAAS,CAAC;IACnB,CAAC;IAEO,qCAAO,GAAf,UAAgB,GAAW;QACzB,OAAO,IAAI,OAAO,CAAC,UAAC,OAAO,EAAE,MAAM;YACjC,IAAM,MAAM,GAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;YAChD,MAAM,CAAC,MAAM,GAAG,cAAM,OAAA,OAAO,EAAE,EAAT,CAAS,CAAC;YAChC,MAAM,CAAC,OAAO,GAAG,MAAM,CAAC;YACxB,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;YAC7B,MAAM,CAAC,GAAG,GAAG,GAAG,CAAC;QACnB,CAAC,CAAC,CAAC;IACL,CAAC;IAEa,+CAAiB,GAA/B;;;;;4BAEE,qBAAM,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,WAAW,GAAG,mCAAuB,CAAC,EAAA;;wBAA9D,SAA8D,CAAC;;;;wBAGpD,qBAAM,MAAM,CAAC,4BAA4B,EAAE,EAAA;;wBAApD,MAAM,GAAG,SAA2C,CAAC;wBACrD,IAAI,CAAC,cAAc,GAAG,IAAI,CAAC;;;;wBAE3B,OAAO,CAAC,IAAI,CAAC,gFAAgF,CAAC,CAAC;wBAC/F,qBAAM,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,WAAW,GAAG,8BAAkB,CAAC,EAAA;;wBAAzD,SAAyD,CAAC;wBACjD,qBAAM,MAAM,CAAC,wBAAwB,EAAE,EAAA;;wBAAhD,MAAM,GAAG,SAAuC,CAAC;wBACjD,IAAI,CAAC,cAAc,GAAG,KAAK,CAAC;;4BAE9B,sBAAO,MAAM,EAAC;;;;KACf;IAEa,kDAAoB,GAAlC,UAAmC,UAAqB;;;;;4BACtC,qBAAM,mBAAmB,CAAC,MAAO,CAAC,aAAa,CAAC,UAAU,EAAE,IAAI,CAAC,gBAAgB,CAAC,EAAA;;wBAA5F,OAAO,GAAG,SAAkF;wBAClG,sBAAO,OAAO,CAAC,IAAI,EAAC;;;;KACrB;IAEO,uDAAyB,GAAjC,UAAkC,UAAqB;QAC/C,IAAA,KAA2F,IAAI,EAA7F,4BAAuC,EAAf,KAAK,WAAA,EAAE,MAAM,YAAA,EAAwB,MAAM,wBAAA,EAAW,MAAM,aAAS,CAAC;QACtG,IAAM,MAAM,GAAG,KAAK,GAAG,MAAM,CAAC;QAE9B,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,EAAE,CAAC,EAAE,EAAE;YAC/B,MAAM,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,UAAU,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,GAAG,CAAC;YAC9D,MAAM,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,UAAU,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,GAAG,CAAC;YACtE,MAAM,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,UAAU,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,GAAG,GAAG,CAAC;SACvE;QAED,MAAM,CAAC,aAAa,EAAE,CAAC;QACvB,IAAM,aAAa,GAAG,IAAI,iBAAiB,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QAExD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,EAAE,CAAC,EAAE,EAAE;YAC/B,IAAM,iBAAiB,GAAG,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC,mBAAmB,GAAG,CAAC,CAAC,CAAC;YACvE,aAAa,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,iBAAiB,IAAI,IAAI,CAAC,2BAA2B,CAAC,GAAG,iBAAiB,CAAC;SACtG;QAED,OAAO,aAAa,CAAC;IACvB,CAAC;IAlRc,0BAAM,GAAmB,IAAI,CAAC;IAmR/C,0BAAC;CAAA,AApRD,CAAkD,qBAAS,GAoR1D;AApRqB,kDAAmB","sourcesContent":["import '@tensorflow/tfjs-backend-webgl';\nimport '@tensorflow/tfjs-backend-cpu';\nimport { ModelConfig, PersonInferenceConfig } from '@tensorflow-models/body-pix/dist/body_pix_model';\nimport { BodyPix, load as loadModel } from '@tensorflow-models/body-pix';\nimport { Processor } from '../Processor';\nimport { Benchmark } from '../../utils/Benchmark';\nimport { version } from '../../utils/version';\nimport { Dimensions } from '../../types';\n\nimport {\n BODYPIX_INFERENCE_DIMENSIONS,\n DEBOUNCE,\n HISTORY_COUNT,\n INFERENCE_CONFIG,\n MASK_BLUR_RADIUS,\n MODEL_CONFIG,\n MODEL_NAME,\n PERSON_PROBABILITY_THRESHOLD,\n TFLITE_LOADER_NAME,\n TFLITE_SIMD_LOADER_NAME,\n WASM_INFERENCE_DIMENSIONS,\n} from '../../constants';\n\n/**\n * @private\n */\nexport interface BackgroundProcessorOptions {\n /**\n * The VideoProcessors load assets dynamically depending on certain browser features.\n * You need to serve all the assets and provide the root path so they can be referenced properly.\n * These assets can be copied from the `dist/build` folder which you can add as part of your deployment process.\n * @example\n *
\n *
\n * For virtual background:\n *
\n *\n * ```ts\n * const virtualBackground = new VirtualBackgroundProcessor({\n * assetsPath: 'https://my-server-path/assets',\n * backgroundImage: img,\n * });\n * await virtualBackground.loadModel();\n * ```\n *\n *
\n * For blur background:\n *
\n *\n * ```ts\n * const blurBackground = new GaussianBlurBackgroundProcessor({\n * assetsPath: 'https://my-server-path/assets'\n * });\n * await blurBackground.loadModel();\n * ```\n */\n assetsPath: string;\n\n /**\n * @private\n */\n debounce?: number;\n\n /**\n * @private\n */\n historyCount?: number;\n\n /**\n * @private\n */\n inferenceConfig?: PersonInferenceConfig;\n\n /**\n * @private\n */\n inferenceDimensions?: Dimensions;\n\n /**\n * The blur radius to use when smoothing out the edges of the person's mask.\n * @default\n * ```html\n * 5\n * ```\n */\n maskBlurRadius?: number;\n\n /**\n * @private\n */\n personProbabilityThreshold?: number;\n\n /**\n * @private\n */\n useWasm?: boolean;\n}\n\n/**\n * @private\n */\nexport abstract class BackgroundProcessor extends Processor {\n private static _model: BodyPix | null = null;\n private static async _loadModel(config: ModelConfig = MODEL_CONFIG): Promise {\n BackgroundProcessor._model = await loadModel(config)\n .catch((error: any) => console.error('Unable to load model.', error)) || null;\n }\n protected _outputCanvas: HTMLCanvasElement;\n protected _outputContext: CanvasRenderingContext2D;\n\n private _assetsPath: string;\n private _benchmark: Benchmark;\n private _currentMask: Uint8ClampedArray | Uint8Array = new Uint8ClampedArray();\n private _debounce: number = DEBOUNCE;\n private _dummyImageData: ImageData = new ImageData(1, 1);\n private _historyCount: number = HISTORY_COUNT;\n private _inferenceConfig: PersonInferenceConfig = INFERENCE_CONFIG;\n private _inferenceDimensions: Dimensions = WASM_INFERENCE_DIMENSIONS;\n private _inputCanvas: HTMLCanvasElement;\n private _inputContext: CanvasRenderingContext2D;\n private _inputMemoryOffset: number = 0;\n // tslint:disable-next-line no-unused-variable\n private _isSimdEnabled: boolean | null = null;\n private _maskBlurRadius: number = MASK_BLUR_RADIUS;\n private _maskCanvas: OffscreenCanvas;\n private _maskContext: OffscreenCanvasRenderingContext2D;\n private _masks: (Uint8ClampedArray | Uint8Array)[];\n private _maskUsageCounter: number = 0;\n private _outputMemoryOffset: number = 0;\n private _personProbabilityThreshold: number = PERSON_PROBABILITY_THRESHOLD;\n private _tflite: any;\n private _useWasm: boolean;\n // tslint:disable-next-line no-unused-variable\n private readonly _version: string = version;\n\n constructor(options: BackgroundProcessorOptions) {\n super();\n\n if (typeof options.assetsPath !== 'string') {\n throw new Error('assetsPath parameter is missing');\n }\n let assetsPath = options.assetsPath;\n if (assetsPath && assetsPath[assetsPath.length - 1] !== '/') {\n assetsPath += '/';\n }\n\n this.maskBlurRadius = options.maskBlurRadius!;\n this._assetsPath = assetsPath;\n this._debounce = options.debounce! || DEBOUNCE;\n this._historyCount = options.historyCount! || HISTORY_COUNT;\n this._inferenceConfig = options.inferenceConfig! || INFERENCE_CONFIG;\n this._personProbabilityThreshold = options.personProbabilityThreshold! || PERSON_PROBABILITY_THRESHOLD;\n this._useWasm = typeof options.useWasm === 'boolean' ? options.useWasm : true;\n this._inferenceDimensions = options.inferenceDimensions! ||\n (this._useWasm ? WASM_INFERENCE_DIMENSIONS : BODYPIX_INFERENCE_DIMENSIONS);\n\n this._benchmark = new Benchmark();\n this._inputCanvas = document.createElement('canvas');\n this._inputContext = this._inputCanvas.getContext('2d') as CanvasRenderingContext2D;\n this._maskCanvas = new OffscreenCanvas(1, 1);\n this._maskContext = this._maskCanvas.getContext('2d') as OffscreenCanvasRenderingContext2D;\n this._outputCanvas = document.createElement('canvas');\n this._outputContext = this._outputCanvas.getContext('2d') as CanvasRenderingContext2D;\n this._masks = [];\n }\n\n /**\n * The current blur radius when smoothing out the edges of the person's mask.\n */\n get maskBlurRadius(): number {\n return this._maskBlurRadius;\n }\n\n /**\n * Set a new blur radius to be used when smoothing out the edges of the person's mask.\n */\n set maskBlurRadius(radius: number) {\n if (typeof radius !== 'number' || radius < 0) {\n console.warn(`Valid mask blur radius not found. Using ${MASK_BLUR_RADIUS} as default.`);\n radius = MASK_BLUR_RADIUS;\n }\n this._maskBlurRadius = radius;\n }\n\n /**\n * Load the segmentation model.\n * Call this method before attaching the processor to ensure\n * video frames are processed correctly.\n */\n async loadModel() {\n const [, tflite, modelResponse ] = await Promise.all([\n BackgroundProcessor._loadModel(),\n this._loadTwilioTfLite(),\n fetch(this._assetsPath + MODEL_NAME),\n ]);\n\n const model = await modelResponse.arrayBuffer();\n const modelBufferOffset = tflite._getModelBufferMemoryOffset();\n tflite.HEAPU8.set(new Uint8Array(model), modelBufferOffset);\n tflite._loadModel(model.byteLength);\n\n this._inputMemoryOffset = tflite._getInputMemoryOffset() / 4;\n this._outputMemoryOffset = tflite._getOutputMemoryOffset() / 4;\n\n this._tflite = tflite;\n }\n\n /**\n * Apply a transform to the background of an input video frame and leaving\n * the foreground (person(s)) untouched. Any exception detected will\n * result in the frame being dropped.\n * @param inputFrameBuffer - The source of the input frame to process.\n * @param outputFrameBuffer - The output frame buffer to use to draw the processed frame.\n */\n async processFrame(inputFrameBuffer: OffscreenCanvas, outputFrameBuffer: HTMLCanvasElement): Promise {\n if (!BackgroundProcessor._model || !this._tflite) {\n return;\n }\n if (!inputFrameBuffer || !outputFrameBuffer) {\n throw new Error('Missing input or output frame buffer');\n }\n this._benchmark.end('captureFrameDelay');\n this._benchmark.start('processFrameDelay');\n\n const inputFrame = inputFrameBuffer;\n const { width: captureWidth, height: captureHeight } = inputFrame;\n const { width: inferenceWidth, height: inferenceHeight } = this._inferenceDimensions;\n\n if (this._outputCanvas !== outputFrameBuffer) {\n this._outputCanvas = outputFrameBuffer;\n this._outputContext = outputFrameBuffer.getContext('2d') as CanvasRenderingContext2D;\n }\n\n // Only set the canvas' dimensions if they have changed to prevent unnecessary redraw\n let reInitDummyImage = false;\n if (this._inputCanvas.width !== inferenceWidth) {\n this._inputCanvas.width = inferenceWidth;\n this._maskCanvas.width = inferenceWidth;\n reInitDummyImage = true;\n }\n if (this._inputCanvas.height !== inferenceHeight) {\n this._inputCanvas.height = inferenceHeight;\n this._maskCanvas.height = inferenceHeight;\n reInitDummyImage = true;\n }\n if (reInitDummyImage) {\n this._dummyImageData = new ImageData(\n new Uint8ClampedArray(inferenceWidth * inferenceHeight * 4),\n inferenceWidth, inferenceHeight);\n }\n\n const personMask = await this._createPersonMask(inputFrame);\n\n this._benchmark.start('imageCompositionDelay');\n this._maskContext.putImageData(personMask, 0, 0);\n this._outputContext.save();\n this._outputContext.filter = `blur(${this._maskBlurRadius}px)`;\n this._outputContext.globalCompositeOperation = 'copy';\n this._outputContext.drawImage(this._maskCanvas, 0, 0, captureWidth, captureHeight);\n this._outputContext.filter = 'none';\n this._outputContext.globalCompositeOperation = 'source-in';\n this._outputContext.drawImage(inputFrame, 0, 0, captureWidth, captureHeight);\n this._outputContext.globalCompositeOperation = 'destination-over';\n this._setBackground(inputFrame);\n this._outputContext.restore();\n\n this._benchmark.end('imageCompositionDelay');\n this._benchmark.end('processFrameDelay');\n this._benchmark.end('totalProcessingDelay');\n\n // NOTE (csantos): Start the benchmark from here so we can include the delay from the Video sdk\n // for a more accurate fps\n this._benchmark.start('totalProcessingDelay');\n this._benchmark.start('captureFrameDelay');\n }\n\n protected abstract _setBackground(inputFrame: OffscreenCanvas): void;\n\n private _addMask(mask: Uint8ClampedArray | Uint8Array) {\n if (this._masks.length >= this._historyCount) {\n this._masks.splice(0, this._masks.length - this._historyCount + 1);\n }\n this._masks.push(mask);\n }\n\n private _applyAlpha(imageData: ImageData) {\n const weightedSum = this._masks.reduce((sum, mask, j) => sum + (j + 1) * (j + 1), 0);\n const pixels = imageData.height * imageData.width;\n for (let i = 0; i < pixels; i++) {\n const w = this._masks.reduce((sum, mask, j) => sum + mask[i] * (j + 1) * (j + 1), 0) / weightedSum;\n imageData.data[i * 4 + 3] = Math.round(w * 255);\n }\n }\n\n private async _createPersonMask(inputFrame: OffscreenCanvas): Promise {\n let imageData = this._dummyImageData;\n const shouldRunInference = this._maskUsageCounter < 1;\n\n this._benchmark.start('inputImageResizeDelay');\n if (shouldRunInference) {\n imageData = this._getResizedInputImageData(inputFrame);\n }\n this._benchmark.end('inputImageResizeDelay');\n\n this._benchmark.start('segmentationDelay');\n if (shouldRunInference) {\n this._currentMask = this._useWasm\n ? this._runTwilioTfLiteInference(imageData)\n : await this._runBodyPixInference(imageData);\n this._maskUsageCounter = this._debounce;\n }\n this._addMask(this._currentMask);\n this._applyAlpha(imageData);\n this._maskUsageCounter--;\n this._benchmark.end('segmentationDelay');\n\n return imageData;\n }\n\n private _getResizedInputImageData(inputFrame: OffscreenCanvas): ImageData {\n const { width, height } = this._inputCanvas;\n this._inputContext.drawImage(inputFrame, 0, 0, width, height);\n const imageData = this._inputContext.getImageData(0, 0, width, height);\n return imageData;\n }\n\n private _loadJs(url: string): Promise {\n return new Promise((resolve, reject) => {\n const script = document.createElement('script');\n script.onload = () => resolve();\n script.onerror = reject;\n document.head.append(script);\n script.src = url;\n });\n }\n\n private async _loadTwilioTfLite(): Promise {\n let tflite: any;\n await this._loadJs(this._assetsPath + TFLITE_SIMD_LOADER_NAME);\n\n try {\n tflite = await window.createTwilioTFLiteSIMDModule();\n this._isSimdEnabled = true;\n } catch {\n console.warn('SIMD not supported. You may experience poor quality of background replacement.');\n await this._loadJs(this._assetsPath + TFLITE_LOADER_NAME);\n tflite = await window.createTwilioTFLiteModule();\n this._isSimdEnabled = false;\n }\n return tflite;\n }\n\n private async _runBodyPixInference(inputImage: ImageData): Promise {\n const segment = await BackgroundProcessor._model!.segmentPerson(inputImage, this._inferenceConfig);\n return segment.data;\n }\n\n private _runTwilioTfLiteInference(inputImage: ImageData): Uint8ClampedArray {\n const { _inferenceDimensions: { width, height }, _inputMemoryOffset: offset, _tflite: tflite } = this;\n const pixels = width * height;\n\n for (let i = 0; i < pixels; i++) {\n tflite.HEAPF32[offset + i * 3] = inputImage.data[i * 4] / 255;\n tflite.HEAPF32[offset + i * 3 + 1] = inputImage.data[i * 4 + 1] / 255;\n tflite.HEAPF32[offset + i * 3 + 2] = inputImage.data[i * 4 + 2] / 255;\n }\n\n tflite._runInference();\n const inferenceData = new Uint8ClampedArray(pixels * 4);\n\n for (let i = 0; i < pixels; i++) {\n const personProbability = tflite.HEAPF32[this._outputMemoryOffset + i];\n inferenceData[i] = Number(personProbability >= this._personProbabilityThreshold) * personProbability;\n }\n\n return inferenceData;\n }\n}\n"]} \ No newline at end of file diff --git a/es5/processors/background/GaussianBlurBackgroundProcessor.d.ts b/es5/processors/background/GaussianBlurBackgroundProcessor.d.ts new file mode 100644 index 0000000..dca2a23 --- /dev/null +++ b/es5/processors/background/GaussianBlurBackgroundProcessor.d.ts @@ -0,0 +1,61 @@ +import { BackgroundProcessor, BackgroundProcessorOptions } from './BackgroundProcessor'; +/** + * Options passed to [[GaussianBlurBackgroundProcessor]] constructor. + */ +export interface GaussianBlurBackgroundProcessorOptions extends BackgroundProcessorOptions { + /** + * The background blur filter radius to use in pixels. + * @default + * ```html + * 15 + * ``` + */ + blurFilterRadius?: number; +} +/** + * The GaussianBlurBackgroundProcessor, when added to a VideoTrack, + * applies a gaussian blur filter on the background in each video frame + * and leaves the foreground (person(s)) untouched. Each instance of + * GaussianBlurBackgroundProcessor should be added to only one VideoTrack + * at a time to prevent overlapping of image data from multiple VideoTracks. + * + * @example + * + * ```ts + * import { createLocalVideoTrack } from 'twilio-video'; + * import { GaussianBlurBackgroundProcessor } from '@twilio/video-processors'; + * + * const blurBackground = new GaussianBlurBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets' + * }); + * + * blurBackground.loadModel().then(() => { + * createLocalVideoTrack({ + * width: 640, + * height: 480, + * frameRate: 24 + * }).then(track => { + * track.addProcessor(blurBackground); + * }); + * }); + * ``` + */ +export declare class GaussianBlurBackgroundProcessor extends BackgroundProcessor { + private _blurFilterRadius; + private readonly _name; + /** + * Construct a GaussianBlurBackgroundProcessor. Default values will be used for + * any missing properties in [[GaussianBlurBackgroundProcessorOptions]], and + * invalid properties will be ignored. + */ + constructor(options: GaussianBlurBackgroundProcessorOptions); + /** + * The current background blur filter radius in pixels. + */ + get blurFilterRadius(): number; + /** + * Set a new background blur filter radius in pixels. + */ + set blurFilterRadius(radius: number); + protected _setBackground(inputFrame: OffscreenCanvas): void; +} diff --git a/es5/processors/background/GaussianBlurBackgroundProcessor.js b/es5/processors/background/GaussianBlurBackgroundProcessor.js new file mode 100644 index 0000000..0664ecf --- /dev/null +++ b/es5/processors/background/GaussianBlurBackgroundProcessor.js @@ -0,0 +1,89 @@ +"use strict"; +var __extends = (this && this.__extends) || (function () { + var extendStatics = function (d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; + return extendStatics(d, b); + }; + return function (d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.GaussianBlurBackgroundProcessor = void 0; +var BackgroundProcessor_1 = require("./BackgroundProcessor"); +var constants_1 = require("../../constants"); +/** + * The GaussianBlurBackgroundProcessor, when added to a VideoTrack, + * applies a gaussian blur filter on the background in each video frame + * and leaves the foreground (person(s)) untouched. Each instance of + * GaussianBlurBackgroundProcessor should be added to only one VideoTrack + * at a time to prevent overlapping of image data from multiple VideoTracks. + * + * @example + * + * ```ts + * import { createLocalVideoTrack } from 'twilio-video'; + * import { GaussianBlurBackgroundProcessor } from '@twilio/video-processors'; + * + * const blurBackground = new GaussianBlurBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets' + * }); + * + * blurBackground.loadModel().then(() => { + * createLocalVideoTrack({ + * width: 640, + * height: 480, + * frameRate: 24 + * }).then(track => { + * track.addProcessor(blurBackground); + * }); + * }); + * ``` + */ +var GaussianBlurBackgroundProcessor = /** @class */ (function (_super) { + __extends(GaussianBlurBackgroundProcessor, _super); + /** + * Construct a GaussianBlurBackgroundProcessor. Default values will be used for + * any missing properties in [[GaussianBlurBackgroundProcessorOptions]], and + * invalid properties will be ignored. + */ + function GaussianBlurBackgroundProcessor(options) { + var _this = _super.call(this, options) || this; + _this._blurFilterRadius = constants_1.BLUR_FILTER_RADIUS; + // tslint:disable-next-line no-unused-variable + _this._name = 'GaussianBlurBackgroundProcessor'; + _this.blurFilterRadius = options.blurFilterRadius; + return _this; + } + Object.defineProperty(GaussianBlurBackgroundProcessor.prototype, "blurFilterRadius", { + /** + * The current background blur filter radius in pixels. + */ + get: function () { + return this._blurFilterRadius; + }, + /** + * Set a new background blur filter radius in pixels. + */ + set: function (radius) { + if (!radius) { + console.warn("Valid blur filter radius not found. Using " + constants_1.BLUR_FILTER_RADIUS + " as default."); + radius = constants_1.BLUR_FILTER_RADIUS; + } + this._blurFilterRadius = radius; + }, + enumerable: false, + configurable: true + }); + GaussianBlurBackgroundProcessor.prototype._setBackground = function (inputFrame) { + this._outputContext.filter = "blur(" + this._blurFilterRadius + "px)"; + this._outputContext.drawImage(inputFrame, 0, 0); + }; + return GaussianBlurBackgroundProcessor; +}(BackgroundProcessor_1.BackgroundProcessor)); +exports.GaussianBlurBackgroundProcessor = GaussianBlurBackgroundProcessor; +//# sourceMappingURL=GaussianBlurBackgroundProcessor.js.map \ No newline at end of file diff --git a/es5/processors/background/GaussianBlurBackgroundProcessor.js.map b/es5/processors/background/GaussianBlurBackgroundProcessor.js.map new file mode 100644 index 0000000..ed37d95 --- /dev/null +++ b/es5/processors/background/GaussianBlurBackgroundProcessor.js.map @@ -0,0 +1 @@ +{"version":3,"file":"GaussianBlurBackgroundProcessor.js","sourceRoot":"","sources":["../../../lib/processors/background/GaussianBlurBackgroundProcessor.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;AAAA,6DAAwF;AACxF,6CAAqD;AAgBrD;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2BG;AACH;IAAqD,mDAAmB;IAMtE;;;;OAIG;IACH,yCAAY,OAA+C;QAA3D,YACE,kBAAM,OAAO,CAAC,SAEf;QAZO,uBAAiB,GAAW,8BAAkB,CAAC;QACvD,8CAA8C;QAC7B,WAAK,GAAW,iCAAiC,CAAC;QASjE,KAAI,CAAC,gBAAgB,GAAG,OAAO,CAAC,gBAAiB,CAAC;;IACpD,CAAC;IAKD,sBAAI,6DAAgB;QAHpB;;WAEG;aACH;YACE,OAAO,IAAI,CAAC,iBAAiB,CAAC;QAChC,CAAC;QAED;;WAEG;aACH,UAAqB,MAAc;YACjC,IAAI,CAAC,MAAM,EAAE;gBACX,OAAO,CAAC,IAAI,CAAC,+CAA6C,8BAAkB,iBAAc,CAAC,CAAC;gBAC5F,MAAM,GAAG,8BAAkB,CAAC;aAC7B;YACD,IAAI,CAAC,iBAAiB,GAAG,MAAM,CAAC;QAClC,CAAC;;;OAXA;IAaS,wDAAc,GAAxB,UAAyB,UAA2B;QAClD,IAAI,CAAC,cAAc,CAAC,MAAM,GAAG,UAAQ,IAAI,CAAC,iBAAiB,QAAK,CAAC;QACjE,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,UAAU,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;IAClD,CAAC;IACH,sCAAC;AAAD,CAAC,AAtCD,CAAqD,yCAAmB,GAsCvE;AAtCY,0EAA+B","sourcesContent":["import { BackgroundProcessor, BackgroundProcessorOptions } from './BackgroundProcessor';\nimport { BLUR_FILTER_RADIUS } from '../../constants';\n\n/**\n * Options passed to [[GaussianBlurBackgroundProcessor]] constructor.\n */\nexport interface GaussianBlurBackgroundProcessorOptions extends BackgroundProcessorOptions {\n /**\n * The background blur filter radius to use in pixels.\n * @default\n * ```html\n * 15\n * ```\n */\n blurFilterRadius?: number;\n}\n\n/**\n * The GaussianBlurBackgroundProcessor, when added to a VideoTrack,\n * applies a gaussian blur filter on the background in each video frame\n * and leaves the foreground (person(s)) untouched. Each instance of\n * GaussianBlurBackgroundProcessor should be added to only one VideoTrack\n * at a time to prevent overlapping of image data from multiple VideoTracks.\n *\n * @example\n *\n * ```ts\n * import { createLocalVideoTrack } from 'twilio-video';\n * import { GaussianBlurBackgroundProcessor } from '@twilio/video-processors';\n *\n * const blurBackground = new GaussianBlurBackgroundProcessor({\n * assetsPath: 'https://my-server-path/assets'\n * });\n *\n * blurBackground.loadModel().then(() => {\n * createLocalVideoTrack({\n * width: 640,\n * height: 480,\n * frameRate: 24\n * }).then(track => {\n * track.addProcessor(blurBackground);\n * });\n * });\n * ```\n */\nexport class GaussianBlurBackgroundProcessor extends BackgroundProcessor {\n\n private _blurFilterRadius: number = BLUR_FILTER_RADIUS;\n // tslint:disable-next-line no-unused-variable\n private readonly _name: string = 'GaussianBlurBackgroundProcessor';\n\n /**\n * Construct a GaussianBlurBackgroundProcessor. Default values will be used for\n * any missing properties in [[GaussianBlurBackgroundProcessorOptions]], and\n * invalid properties will be ignored.\n */\n constructor(options: GaussianBlurBackgroundProcessorOptions) {\n super(options);\n this.blurFilterRadius = options.blurFilterRadius!;\n }\n\n /**\n * The current background blur filter radius in pixels.\n */\n get blurFilterRadius(): number {\n return this._blurFilterRadius;\n }\n\n /**\n * Set a new background blur filter radius in pixels.\n */\n set blurFilterRadius(radius: number) {\n if (!radius) {\n console.warn(`Valid blur filter radius not found. Using ${BLUR_FILTER_RADIUS} as default.`);\n radius = BLUR_FILTER_RADIUS;\n }\n this._blurFilterRadius = radius;\n }\n\n protected _setBackground(inputFrame: OffscreenCanvas): void {\n this._outputContext.filter = `blur(${this._blurFilterRadius}px)`;\n this._outputContext.drawImage(inputFrame, 0, 0);\n }\n}\n"]} \ No newline at end of file diff --git a/es5/processors/background/VirtualBackgroundProcessor.d.ts b/es5/processors/background/VirtualBackgroundProcessor.d.ts new file mode 100644 index 0000000..8052939 --- /dev/null +++ b/es5/processors/background/VirtualBackgroundProcessor.d.ts @@ -0,0 +1,89 @@ +import { BackgroundProcessor, BackgroundProcessorOptions } from './BackgroundProcessor'; +import { ImageFit } from '../../types'; +/** + * Options passed to [[VirtualBackgroundProcessor]] constructor. + */ +export interface VirtualBackgroundProcessorOptions extends BackgroundProcessorOptions { + /** + * The HTMLImageElement to use for background replacement. + * An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow + * [security guidelines](https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image) + * when loading the image from a different origin. Failing to do so will result to an empty output frame. + */ + backgroundImage: HTMLImageElement; + /** + * The [[ImageFit]] to use for positioning of the background image in the viewport. + * @default + * ```html + * 'Fill' + * ``` + */ + fitType?: ImageFit; +} +/** + * The VirtualBackgroundProcessor, when added to a VideoTrack, + * replaces the background in each video frame with a given image, + * and leaves the foreground (person(s)) untouched. Each instance of + * VirtualBackgroundProcessor should be added to only one VideoTrack + * at a time to prevent overlapping of image data from multiple VideoTracks. + * + * @example + * + * ```ts + * import { createLocalVideoTrack } from 'twilio-video'; + * import { VirtualBackgroundProcessor } from '@twilio/video-processors'; + * + * let virtualBackground; + * const img = new Image(); + * + * img.onload = () => { + * virtualBackground = new VirtualBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets', + * backgroundImage: img, + * }); + * + * virtualBackground.loadModel().then(() => { + * createLocalVideoTrack({ + * width: 640, + * height: 480, + * frameRate: 24 + * }).then(track => { + * track.addProcessor(virtualBackground); + * }); + * }); + * }; + * img.src = '/background.jpg'; + * ``` + */ +export declare class VirtualBackgroundProcessor extends BackgroundProcessor { + private _backgroundImage; + private _fitType; + private readonly _name; + /** + * Construct a VirtualBackgroundProcessor. Default values will be used for + * any missing optional properties in [[VirtualBackgroundProcessorOptions]], + * and invalid properties will be ignored. + */ + constructor(options: VirtualBackgroundProcessorOptions); + /** + * The HTMLImageElement representing the current background image. + */ + get backgroundImage(): HTMLImageElement; + /** + * Set an HTMLImageElement as the new background image. + * An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow + * [security guidelines](https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image) + * when loading the image from a different origin. Failing to do so will result to an empty output frame. + */ + set backgroundImage(image: HTMLImageElement); + /** + * The current [[ImageFit]] for positioning of the background image in the viewport. + */ + get fitType(): ImageFit; + /** + * Set a new [[ImageFit]] to be used for positioning the background image in the viewport. + */ + set fitType(fitType: ImageFit); + protected _setBackground(): void; + private _getFitPosition; +} diff --git a/es5/processors/background/VirtualBackgroundProcessor.js b/es5/processors/background/VirtualBackgroundProcessor.js new file mode 100644 index 0000000..22df096 --- /dev/null +++ b/es5/processors/background/VirtualBackgroundProcessor.js @@ -0,0 +1,158 @@ +"use strict"; +var __extends = (this && this.__extends) || (function () { + var extendStatics = function (d, b) { + extendStatics = Object.setPrototypeOf || + ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || + function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; + return extendStatics(d, b); + }; + return function (d, b) { + extendStatics(d, b); + function __() { this.constructor = d; } + d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); + }; +})(); +Object.defineProperty(exports, "__esModule", { value: true }); +exports.VirtualBackgroundProcessor = void 0; +var BackgroundProcessor_1 = require("./BackgroundProcessor"); +var types_1 = require("../../types"); +/** + * The VirtualBackgroundProcessor, when added to a VideoTrack, + * replaces the background in each video frame with a given image, + * and leaves the foreground (person(s)) untouched. Each instance of + * VirtualBackgroundProcessor should be added to only one VideoTrack + * at a time to prevent overlapping of image data from multiple VideoTracks. + * + * @example + * + * ```ts + * import { createLocalVideoTrack } from 'twilio-video'; + * import { VirtualBackgroundProcessor } from '@twilio/video-processors'; + * + * let virtualBackground; + * const img = new Image(); + * + * img.onload = () => { + * virtualBackground = new VirtualBackgroundProcessor({ + * assetsPath: 'https://my-server-path/assets', + * backgroundImage: img, + * }); + * + * virtualBackground.loadModel().then(() => { + * createLocalVideoTrack({ + * width: 640, + * height: 480, + * frameRate: 24 + * }).then(track => { + * track.addProcessor(virtualBackground); + * }); + * }); + * }; + * img.src = '/background.jpg'; + * ``` + */ +var VirtualBackgroundProcessor = /** @class */ (function (_super) { + __extends(VirtualBackgroundProcessor, _super); + /** + * Construct a VirtualBackgroundProcessor. Default values will be used for + * any missing optional properties in [[VirtualBackgroundProcessorOptions]], + * and invalid properties will be ignored. + */ + function VirtualBackgroundProcessor(options) { + var _this = _super.call(this, options) || this; + // tslint:disable-next-line no-unused-variable + _this._name = 'VirtualBackgroundProcessor'; + _this.backgroundImage = options.backgroundImage; + _this.fitType = options.fitType; + return _this; + } + Object.defineProperty(VirtualBackgroundProcessor.prototype, "backgroundImage", { + /** + * The HTMLImageElement representing the current background image. + */ + get: function () { + return this._backgroundImage; + }, + /** + * Set an HTMLImageElement as the new background image. + * An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow + * [security guidelines](https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image) + * when loading the image from a different origin. Failing to do so will result to an empty output frame. + */ + set: function (image) { + if (!image || !image.complete || !image.naturalHeight) { + throw new Error('Invalid image. Make sure that the image is an HTMLImageElement and has been successfully loaded'); + } + this._backgroundImage = image; + }, + enumerable: false, + configurable: true + }); + Object.defineProperty(VirtualBackgroundProcessor.prototype, "fitType", { + /** + * The current [[ImageFit]] for positioning of the background image in the viewport. + */ + get: function () { + return this._fitType; + }, + /** + * Set a new [[ImageFit]] to be used for positioning the background image in the viewport. + */ + set: function (fitType) { + var validTypes = Object.keys(types_1.ImageFit); + if (!validTypes.includes(fitType)) { + console.warn("Valid fitType not found. Using '" + types_1.ImageFit.Fill + "' as default."); + fitType = types_1.ImageFit.Fill; + } + this._fitType = fitType; + }, + enumerable: false, + configurable: true + }); + VirtualBackgroundProcessor.prototype._setBackground = function () { + var img = this._backgroundImage; + var imageWidth = img.naturalWidth; + var imageHeight = img.naturalHeight; + var canvasWidth = this._outputCanvas.width; + var canvasHeight = this._outputCanvas.height; + if (this._fitType === types_1.ImageFit.Fill) { + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, 0, 0, canvasWidth, canvasHeight); + } + else if (this._fitType === types_1.ImageFit.None) { + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight); + } + else if (this._fitType === types_1.ImageFit.Contain) { + var _a = this._getFitPosition(imageWidth, imageHeight, canvasWidth, canvasHeight, types_1.ImageFit.Contain), x = _a.x, y = _a.y, w = _a.w, h = _a.h; + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, x, y, w, h); + } + else if (this._fitType === types_1.ImageFit.Cover) { + var _b = this._getFitPosition(imageWidth, imageHeight, canvasWidth, canvasHeight, types_1.ImageFit.Cover), x = _b.x, y = _b.y, w = _b.w, h = _b.h; + this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, x, y, w, h); + } + }; + VirtualBackgroundProcessor.prototype._getFitPosition = function (contentWidth, contentHeight, viewportWidth, viewportHeight, type) { + // Calculate new content width to fit viewport width + var factor = viewportWidth / contentWidth; + var newContentWidth = viewportWidth; + var newContentHeight = factor * contentHeight; + // Scale down the resulting height and width more + // to fit viewport height if the content still exceeds it + if ((type === types_1.ImageFit.Contain && newContentHeight > viewportHeight) + || (type === types_1.ImageFit.Cover && viewportHeight > newContentHeight)) { + factor = viewportHeight / newContentHeight; + newContentWidth = factor * newContentWidth; + newContentHeight = viewportHeight; + } + // Calculate the destination top left corner to center the content + var x = (viewportWidth - newContentWidth) / 2; + var y = (viewportHeight - newContentHeight) / 2; + return { + x: x, y: y, + w: newContentWidth, + h: newContentHeight, + }; + }; + return VirtualBackgroundProcessor; +}(BackgroundProcessor_1.BackgroundProcessor)); +exports.VirtualBackgroundProcessor = VirtualBackgroundProcessor; +//# sourceMappingURL=VirtualBackgroundProcessor.js.map \ No newline at end of file diff --git a/es5/processors/background/VirtualBackgroundProcessor.js.map b/es5/processors/background/VirtualBackgroundProcessor.js.map new file mode 100644 index 0000000..f9b9955 --- /dev/null +++ b/es5/processors/background/VirtualBackgroundProcessor.js.map @@ -0,0 +1 @@ +{"version":3,"file":"VirtualBackgroundProcessor.js","sourceRoot":"","sources":["../../../lib/processors/background/VirtualBackgroundProcessor.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;AAAA,6DAAwF;AACxF,qCAAuC;AAwBvC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAkCG;AACH;IAAgD,8CAAmB;IAOjE;;;;OAIG;IACH,oCAAY,OAA0C;QAAtD,YACE,kBAAM,OAAO,CAAC,SAGf;QAZD,8CAA8C;QAC7B,WAAK,GAAW,4BAA4B,CAAC;QAS5D,KAAI,CAAC,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC;QAC/C,KAAI,CAAC,OAAO,GAAG,OAAO,CAAC,OAAQ,CAAC;;IAClC,CAAC;IAKD,sBAAI,uDAAe;QAHnB;;WAEG;aACH;YACE,OAAO,IAAI,CAAC,gBAAgB,CAAC;QAC/B,CAAC;QAED;;;;;WAKG;aACH,UAAoB,KAAuB;YACzC,IAAI,CAAC,KAAK,IAAI,CAAC,KAAK,CAAC,QAAQ,IAAI,CAAC,KAAK,CAAC,aAAa,EAAE;gBACrD,MAAM,IAAI,KAAK,CAAC,iGAAiG,CAAC,CAAC;aACpH;YACD,IAAI,CAAC,gBAAgB,GAAG,KAAK,CAAC;QAChC,CAAC;;;OAbA;IAkBD,sBAAI,+CAAO;QAHX;;WAEG;aACH;YACE,OAAO,IAAI,CAAC,QAAQ,CAAC;QACvB,CAAC;QAED;;WAEG;aACH,UAAY,OAAiB;YAC3B,IAAM,UAAU,GAAG,MAAM,CAAC,IAAI,CAAC,gBAAQ,CAAC,CAAC;YACzC,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,OAAc,CAAC,EAAE;gBACxC,OAAO,CAAC,IAAI,CAAC,qCAAmC,gBAAQ,CAAC,IAAI,kBAAe,CAAC,CAAC;gBAC9E,OAAO,GAAG,gBAAQ,CAAC,IAAI,CAAC;aACzB;YACD,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC;QAC1B,CAAC;;;OAZA;IAcS,mDAAc,GAAxB;QACE,IAAM,GAAG,GAAG,IAAI,CAAC,gBAAgB,CAAC;QAClC,IAAM,UAAU,GAAG,GAAG,CAAC,YAAY,CAAC;QACpC,IAAM,WAAW,GAAG,GAAG,CAAC,aAAa,CAAC;QACtC,IAAM,WAAW,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC;QAC7C,IAAM,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC;QAE/C,IAAI,IAAI,CAAC,QAAQ,KAAK,gBAAQ,CAAC,IAAI,EAAE;YACnC,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,UAAU,EAAE,WAAW,EAAE,CAAC,EAAE,CAAC,EAAE,WAAW,EAAE,YAAY,CAAC,CAAC;SACpG;aAAM,IAAI,IAAI,CAAC,QAAQ,KAAK,gBAAQ,CAAC,IAAI,EAAE;YAC1C,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,UAAU,EAAE,WAAW,CAAC,CAAC;SACnE;aAAM,IAAI,IAAI,CAAC,QAAQ,KAAK,gBAAQ,CAAC,OAAO,EAAE;YACvC,IAAA,KAAiB,IAAI,CAAC,eAAe,CAAC,UAAU,EAAE,WAAW,EAAE,WAAW,EAAE,YAAY,EAAE,gBAAQ,CAAC,OAAO,CAAC,EAAzG,CAAC,OAAA,EAAE,CAAC,OAAA,EAAE,CAAC,OAAA,EAAE,CAAC,OAA+F,CAAC;YAClH,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,UAAU,EAAE,WAAW,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;SAC/E;aAAM,IAAI,IAAI,CAAC,QAAQ,KAAK,gBAAQ,CAAC,KAAK,EAAE;YACrC,IAAA,KAAiB,IAAI,CAAC,eAAe,CAAC,UAAU,EAAE,WAAW,EAAE,WAAW,EAAE,YAAY,EAAE,gBAAQ,CAAC,KAAK,CAAC,EAAvG,CAAC,OAAA,EAAE,CAAC,OAAA,EAAE,CAAC,OAAA,EAAE,CAAC,OAA6F,CAAC;YAChH,IAAI,CAAC,cAAc,CAAC,SAAS,CAAC,GAAG,EAAE,CAAC,EAAE,CAAC,EAAE,UAAU,EAAE,WAAW,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;SAC/E;IACH,CAAC;IAEO,oDAAe,GAAvB,UAAwB,YAAoB,EAAE,aAAqB,EACjE,aAAqB,EAAE,cAAsB,EAAE,IAAc;QAG7D,oDAAoD;QACpD,IAAI,MAAM,GAAG,aAAa,GAAG,YAAY,CAAC;QAC1C,IAAI,eAAe,GAAG,aAAa,CAAC;QACpC,IAAI,gBAAgB,GAAG,MAAM,GAAG,aAAa,CAAC;QAE9C,iDAAiD;QACjD,yDAAyD;QACzD,IAAI,CAAC,IAAI,KAAK,gBAAQ,CAAC,OAAO,IAAI,gBAAgB,GAAG,cAAc,CAAC;eAC/D,CAAC,IAAI,KAAK,gBAAQ,CAAC,KAAK,IAAI,cAAc,GAAG,gBAAgB,CAAC,EAAE;YACnE,MAAM,GAAG,cAAc,GAAG,gBAAgB,CAAC;YAC3C,eAAe,GAAG,MAAM,GAAG,eAAe,CAAC;YAC3C,gBAAgB,GAAG,cAAc,CAAC;SACnC;QAED,kEAAkE;QAClE,IAAM,CAAC,GAAG,CAAC,aAAa,GAAG,eAAe,CAAC,GAAG,CAAC,CAAC;QAChD,IAAM,CAAC,GAAG,CAAC,cAAc,GAAG,gBAAgB,CAAC,GAAG,CAAC,CAAC;QAElD,OAAO;YACL,CAAC,GAAA,EAAE,CAAC,GAAA;YACJ,CAAC,EAAE,eAAe;YAClB,CAAC,EAAE,gBAAgB;SACpB,CAAC;IACJ,CAAC;IACH,iCAAC;AAAD,CAAC,AAzGD,CAAgD,yCAAmB,GAyGlE;AAzGY,gEAA0B","sourcesContent":["import { BackgroundProcessor, BackgroundProcessorOptions } from './BackgroundProcessor';\nimport { ImageFit } from '../../types';\n\n/**\n * Options passed to [[VirtualBackgroundProcessor]] constructor.\n */\nexport interface VirtualBackgroundProcessorOptions extends BackgroundProcessorOptions {\n /**\n * The HTMLImageElement to use for background replacement.\n * An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow\n * [security guidelines](https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image)\n * when loading the image from a different origin. Failing to do so will result to an empty output frame.\n */\n backgroundImage: HTMLImageElement;\n\n /**\n * The [[ImageFit]] to use for positioning of the background image in the viewport.\n * @default\n * ```html\n * 'Fill'\n * ```\n */\n fitType?: ImageFit;\n}\n\n/**\n * The VirtualBackgroundProcessor, when added to a VideoTrack,\n * replaces the background in each video frame with a given image,\n * and leaves the foreground (person(s)) untouched. Each instance of\n * VirtualBackgroundProcessor should be added to only one VideoTrack\n * at a time to prevent overlapping of image data from multiple VideoTracks.\n *\n * @example\n *\n * ```ts\n * import { createLocalVideoTrack } from 'twilio-video';\n * import { VirtualBackgroundProcessor } from '@twilio/video-processors';\n *\n * let virtualBackground;\n * const img = new Image();\n *\n * img.onload = () => {\n * virtualBackground = new VirtualBackgroundProcessor({\n * assetsPath: 'https://my-server-path/assets',\n * backgroundImage: img,\n * });\n *\n * virtualBackground.loadModel().then(() => {\n * createLocalVideoTrack({\n * width: 640,\n * height: 480,\n * frameRate: 24\n * }).then(track => {\n * track.addProcessor(virtualBackground);\n * });\n * });\n * };\n * img.src = '/background.jpg';\n * ```\n */\nexport class VirtualBackgroundProcessor extends BackgroundProcessor {\n\n private _backgroundImage!: HTMLImageElement;\n private _fitType!: ImageFit;\n // tslint:disable-next-line no-unused-variable\n private readonly _name: string = 'VirtualBackgroundProcessor';\n\n /**\n * Construct a VirtualBackgroundProcessor. Default values will be used for\n * any missing optional properties in [[VirtualBackgroundProcessorOptions]],\n * and invalid properties will be ignored.\n */\n constructor(options: VirtualBackgroundProcessorOptions) {\n super(options);\n this.backgroundImage = options.backgroundImage;\n this.fitType = options.fitType!;\n }\n\n /**\n * The HTMLImageElement representing the current background image.\n */\n get backgroundImage(): HTMLImageElement {\n return this._backgroundImage;\n }\n\n /**\n * Set an HTMLImageElement as the new background image.\n * An error will be raised if the image hasn't been fully loaded yet. Additionally, the image must follow\n * [security guidelines](https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image)\n * when loading the image from a different origin. Failing to do so will result to an empty output frame.\n */\n set backgroundImage(image: HTMLImageElement) {\n if (!image || !image.complete || !image.naturalHeight) {\n throw new Error('Invalid image. Make sure that the image is an HTMLImageElement and has been successfully loaded');\n }\n this._backgroundImage = image;\n }\n\n /**\n * The current [[ImageFit]] for positioning of the background image in the viewport.\n */\n get fitType(): ImageFit {\n return this._fitType;\n }\n\n /**\n * Set a new [[ImageFit]] to be used for positioning the background image in the viewport.\n */\n set fitType(fitType: ImageFit) {\n const validTypes = Object.keys(ImageFit);\n if (!validTypes.includes(fitType as any)) {\n console.warn(`Valid fitType not found. Using '${ImageFit.Fill}' as default.`);\n fitType = ImageFit.Fill;\n }\n this._fitType = fitType;\n }\n\n protected _setBackground(): void {\n const img = this._backgroundImage;\n const imageWidth = img.naturalWidth;\n const imageHeight = img.naturalHeight;\n const canvasWidth = this._outputCanvas.width;\n const canvasHeight = this._outputCanvas.height;\n\n if (this._fitType === ImageFit.Fill) {\n this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, 0, 0, canvasWidth, canvasHeight);\n } else if (this._fitType === ImageFit.None) {\n this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight);\n } else if (this._fitType === ImageFit.Contain) {\n const { x, y, w, h } = this._getFitPosition(imageWidth, imageHeight, canvasWidth, canvasHeight, ImageFit.Contain);\n this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, x, y, w, h);\n } else if (this._fitType === ImageFit.Cover) {\n const { x, y, w, h } = this._getFitPosition(imageWidth, imageHeight, canvasWidth, canvasHeight, ImageFit.Cover);\n this._outputContext.drawImage(img, 0, 0, imageWidth, imageHeight, x, y, w, h);\n }\n }\n\n private _getFitPosition(contentWidth: number, contentHeight: number,\n viewportWidth: number, viewportHeight: number, type: ImageFit)\n : { h: number, w: number, x: number, y: number } {\n\n // Calculate new content width to fit viewport width\n let factor = viewportWidth / contentWidth;\n let newContentWidth = viewportWidth;\n let newContentHeight = factor * contentHeight;\n\n // Scale down the resulting height and width more\n // to fit viewport height if the content still exceeds it\n if ((type === ImageFit.Contain && newContentHeight > viewportHeight)\n || (type === ImageFit.Cover && viewportHeight > newContentHeight)) {\n factor = viewportHeight / newContentHeight;\n newContentWidth = factor * newContentWidth;\n newContentHeight = viewportHeight;\n }\n\n // Calculate the destination top left corner to center the content\n const x = (viewportWidth - newContentWidth) / 2;\n const y = (viewportHeight - newContentHeight) / 2;\n\n return {\n x, y,\n w: newContentWidth,\n h: newContentHeight,\n };\n }\n}\n"]} \ No newline at end of file diff --git a/es5/types.d.ts b/es5/types.d.ts new file mode 100644 index 0000000..68d0775 --- /dev/null +++ b/es5/types.d.ts @@ -0,0 +1,52 @@ +/** + * @private + */ +declare global { + interface Window { + chrome: any; + createTwilioTFLiteModule: () => Promise; + createTwilioTFLiteSIMDModule: () => Promise; + Twilio: Object & { + VideoProcessors?: any; + }; + } +} +/** + * @private + */ +export interface Timing { + delay?: number; + end?: number; + start?: number; +} +/** + * @private + */ +export interface Dimensions { + height: number; + width: number; +} +/** + * ImageFit specifies the positioning of an image inside a viewport. + */ +export declare enum ImageFit { + /** + * Scale the image up or down to fill the viewport while preserving the aspect ratio. + * The image will be fully visible but will add empty space in the viewport if + * aspect ratios do not match. + */ + Contain = "Contain", + /** + * Scale the image to fill both height and width of the viewport while preserving + * the aspect ratio, but will crop the image if aspect ratios do not match. + */ + Cover = "Cover", + /** + * Stretches the image to fill the viewport regardless of aspect ratio. + */ + Fill = "Fill", + /** + * Ignore height and width and use the original size. + */ + None = "None" +} diff --git a/es5/types.js b/es5/types.js new file mode 100644 index 0000000..9c25d01 --- /dev/null +++ b/es5/types.js @@ -0,0 +1,29 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.ImageFit = void 0; +/** + * ImageFit specifies the positioning of an image inside a viewport. + */ +var ImageFit; +(function (ImageFit) { + /** + * Scale the image up or down to fill the viewport while preserving the aspect ratio. + * The image will be fully visible but will add empty space in the viewport if + * aspect ratios do not match. + */ + ImageFit["Contain"] = "Contain"; + /** + * Scale the image to fill both height and width of the viewport while preserving + * the aspect ratio, but will crop the image if aspect ratios do not match. + */ + ImageFit["Cover"] = "Cover"; + /** + * Stretches the image to fill the viewport regardless of aspect ratio. + */ + ImageFit["Fill"] = "Fill"; + /** + * Ignore height and width and use the original size. + */ + ImageFit["None"] = "None"; +})(ImageFit = exports.ImageFit || (exports.ImageFit = {})); +//# sourceMappingURL=types.js.map \ No newline at end of file diff --git a/es5/types.js.map b/es5/types.js.map new file mode 100644 index 0000000..8e891c8 --- /dev/null +++ b/es5/types.js.map @@ -0,0 +1 @@ +{"version":3,"file":"types.js","sourceRoot":"","sources":["../lib/types.ts"],"names":[],"mappings":";;;AA6BA;;GAEG;AACH,IAAY,QAuBX;AAvBD,WAAY,QAAQ;IAClB;;;;OAIG;IACH,+BAAmB,CAAA;IAEnB;;;OAGG;IACH,2BAAe,CAAA;IAEf;;OAEG;IACH,yBAAa,CAAA;IAEb;;OAEG;IACH,yBAAa,CAAA;AACf,CAAC,EAvBW,QAAQ,GAAR,gBAAQ,KAAR,gBAAQ,QAuBnB","sourcesContent":["/**\n * @private\n */\n declare global {\n interface Window {\n chrome: any;\n createTwilioTFLiteModule: () => Promise;\n createTwilioTFLiteSIMDModule: () => Promise;\n Twilio: Object & { VideoProcessors?: any };\n }\n}\n\n/**\n * @private\n */\nexport interface Timing {\n delay?: number;\n end?: number;\n start?: number;\n}\n\n/**\n * @private\n */\nexport interface Dimensions {\n height: number;\n width: number;\n}\n\n/**\n * ImageFit specifies the positioning of an image inside a viewport.\n */\nexport enum ImageFit {\n /**\n * Scale the image up or down to fill the viewport while preserving the aspect ratio.\n * The image will be fully visible but will add empty space in the viewport if\n * aspect ratios do not match.\n */\n Contain = 'Contain',\n\n /**\n * Scale the image to fill both height and width of the viewport while preserving\n * the aspect ratio, but will crop the image if aspect ratios do not match.\n */\n Cover = 'Cover',\n\n /**\n * Stretches the image to fill the viewport regardless of aspect ratio.\n */\n Fill = 'Fill',\n\n /**\n * Ignore height and width and use the original size.\n */\n None = 'None'\n}\n"]} \ No newline at end of file diff --git a/es5/utils/Benchmark.d.ts b/es5/utils/Benchmark.d.ts new file mode 100644 index 0000000..7de7039 --- /dev/null +++ b/es5/utils/Benchmark.d.ts @@ -0,0 +1,15 @@ +/** + * @private + */ +export declare class Benchmark { + static readonly cacheSize = 41; + private _timingCache; + private _timings; + constructor(); + end(name: string): void; + getAverageDelay(name: string): number | undefined; + getNames(): string[]; + getRate(name: string): number | undefined; + start(name: string): void; + private _save; +} diff --git a/es5/utils/Benchmark.js b/es5/utils/Benchmark.js new file mode 100644 index 0000000..72660e5 --- /dev/null +++ b/es5/utils/Benchmark.js @@ -0,0 +1,79 @@ +"use strict"; +var __assign = (this && this.__assign) || function () { + __assign = Object.assign || function(t) { + for (var s, i = 1, n = arguments.length; i < n; i++) { + s = arguments[i]; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) + t[p] = s[p]; + } + return t; + }; + return __assign.apply(this, arguments); +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.Benchmark = void 0; +/** + * @private + */ +var Benchmark = /** @class */ (function () { + function Benchmark() { + this._timingCache = new Map(); + this._timings = new Map(); + } + Benchmark.prototype.end = function (name) { + var timing = this._timings.get(name); + if (!timing) { + return; + } + timing.end = Date.now(); + timing.delay = timing.end - timing.start; + this._save(name, __assign({}, timing)); + }; + Benchmark.prototype.getAverageDelay = function (name) { + var timingCache = this._timingCache.get(name); + if (!timingCache || !timingCache.length) { + return; + } + return timingCache.map(function (timing) { return timing.delay; }) + .reduce(function (total, value) { return total += value; }, 0) / timingCache.length; + }; + Benchmark.prototype.getNames = function () { + return Array.from(this._timingCache.keys()); + }; + Benchmark.prototype.getRate = function (name) { + var timingCache = this._timingCache.get(name); + if (!timingCache || timingCache.length < 2) { + return; + } + var totalDelay = timingCache[timingCache.length - 1].end - timingCache[0].start; + return (timingCache.length / totalDelay) * 1000; + }; + Benchmark.prototype.start = function (name) { + var timing = this._timings.get(name); + if (!timing) { + timing = {}; + this._timings.set(name, timing); + } + timing.start = Date.now(); + delete timing.end; + delete timing.delay; + }; + Benchmark.prototype._save = function (name, timing) { + var timingCache = this._timingCache.get(name); + if (!timingCache) { + timingCache = []; + this._timingCache.set(name, timingCache); + } + timingCache.push(timing); + if (timingCache.length > Benchmark.cacheSize) { + timingCache.splice(0, timingCache.length - Benchmark.cacheSize); + } + }; + // NOTE (csantos): How many timing information to save per benchmark. + // This is about the amount of timing info generated on a 24fps input. + // Enough samples to calculate fps + Benchmark.cacheSize = 41; + return Benchmark; +}()); +exports.Benchmark = Benchmark; +//# sourceMappingURL=Benchmark.js.map \ No newline at end of file diff --git a/es5/utils/Benchmark.js.map b/es5/utils/Benchmark.js.map new file mode 100644 index 0000000..fb5a05d --- /dev/null +++ b/es5/utils/Benchmark.js.map @@ -0,0 +1 @@ +{"version":3,"file":"Benchmark.js","sourceRoot":"","sources":["../../lib/utils/Benchmark.ts"],"names":[],"mappings":";;;;;;;;;;;;;;AAEA;;GAEG;AACH;IAUE;QACE,IAAI,CAAC,YAAY,GAAG,IAAI,GAAG,EAAE,CAAC;QAC9B,IAAI,CAAC,QAAQ,GAAG,IAAI,GAAG,EAAE,CAAC;IAC5B,CAAC;IAED,uBAAG,GAAH,UAAI,IAAY;QACd,IAAM,MAAM,GAAG,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QACvC,IAAI,CAAC,MAAM,EAAE;YACX,OAAO;SACR;QACD,MAAM,CAAC,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QACxB,MAAM,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,GAAG,MAAM,CAAC,KAAM,CAAC;QAC1C,IAAI,CAAC,KAAK,CAAC,IAAI,eAAM,MAAM,EAAE,CAAC;IAChC,CAAC;IAED,mCAAe,GAAf,UAAgB,IAAY;QAC1B,IAAM,WAAW,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QAChD,IAAI,CAAC,WAAW,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE;YACvC,OAAO;SACR;QACD,OAAO,WAAW,CAAC,GAAG,CAAC,UAAA,MAAM,IAAI,OAAA,MAAM,CAAC,KAAM,EAAb,CAAa,CAAC;aAC5C,MAAM,CAAC,UAAC,KAAa,EAAE,KAAa,IAAK,OAAA,KAAK,IAAI,KAAK,EAAd,CAAc,EAAE,CAAC,CAAC,GAAG,WAAW,CAAC,MAAM,CAAC;IACtF,CAAC;IAED,4BAAQ,GAAR;QACE,OAAO,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,YAAY,CAAC,IAAI,EAAE,CAAC,CAAC;IAC9C,CAAC;IAED,2BAAO,GAAP,UAAQ,IAAY;QAClB,IAAM,WAAW,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QAChD,IAAI,CAAC,WAAW,IAAI,WAAW,CAAC,MAAM,GAAG,CAAC,EAAE;YAC1C,OAAO;SACR;QACD,IAAM,UAAU,GAAG,WAAW,CAAC,WAAW,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,GAAI,GAAG,WAAW,CAAC,CAAC,CAAC,CAAC,KAAM,CAAC;QACpF,OAAO,CAAC,WAAW,CAAC,MAAM,GAAG,UAAU,CAAC,GAAG,IAAI,CAAC;IAClD,CAAC;IAED,yBAAK,GAAL,UAAM,IAAY;QAChB,IAAI,MAAM,GAAG,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QACrC,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,GAAG,EAAE,CAAC;YACZ,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;SACjC;QACD,MAAM,CAAC,KAAK,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QAC1B,OAAO,MAAM,CAAC,GAAG,CAAC;QAClB,OAAO,MAAM,CAAC,KAAK,CAAC;IACtB,CAAC;IAEO,yBAAK,GAAb,UAAc,IAAY,EAAE,MAAc;QACxC,IAAI,WAAW,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QAC9C,IAAI,CAAC,WAAW,EAAE;YAChB,WAAW,GAAG,EAAE,CAAC;YACjB,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,IAAI,EAAE,WAAW,CAAC,CAAC;SAC1C;QAED,WAAW,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QAEzB,IAAI,WAAW,CAAC,MAAM,GAAG,SAAS,CAAC,SAAS,EAAE;YAC5C,WAAW,CAAC,MAAM,CAAC,CAAC,EAAE,WAAW,CAAC,MAAM,GAAG,SAAS,CAAC,SAAS,CAAC,CAAC;SACjE;IACH,CAAC;IApED,qEAAqE;IACrE,sEAAsE;IACtE,kCAAkC;IAClB,mBAAS,GAAG,EAAE,CAAC;IAkEjC,gBAAC;CAAA,AAvED,IAuEC;AAvEY,8BAAS","sourcesContent":["import { Timing } from '../types';\n\n/**\n * @private\n */\nexport class Benchmark {\n\n // NOTE (csantos): How many timing information to save per benchmark.\n // This is about the amount of timing info generated on a 24fps input.\n // Enough samples to calculate fps\n static readonly cacheSize = 41;\n\n private _timingCache: Map;\n private _timings: Map;\n\n constructor() {\n this._timingCache = new Map();\n this._timings = new Map();\n }\n\n end(name: string) {\n const timing = this._timings.get(name);\n if (!timing) {\n return;\n }\n timing.end = Date.now();\n timing.delay = timing.end - timing.start!;\n this._save(name, {...timing});\n }\n\n getAverageDelay(name: string): number | undefined {\n const timingCache = this._timingCache.get(name);\n if (!timingCache || !timingCache.length) {\n return;\n }\n return timingCache.map(timing => timing.delay!)\n .reduce((total: number, value: number) => total += value, 0) / timingCache.length;\n }\n\n getNames(): string[] {\n return Array.from(this._timingCache.keys());\n }\n\n getRate(name: string): number | undefined {\n const timingCache = this._timingCache.get(name);\n if (!timingCache || timingCache.length < 2) {\n return;\n }\n const totalDelay = timingCache[timingCache.length - 1].end! - timingCache[0].start!;\n return (timingCache.length / totalDelay) * 1000;\n }\n\n start(name: string) {\n let timing = this._timings.get(name);\n if (!timing) {\n timing = {};\n this._timings.set(name, timing);\n }\n timing.start = Date.now();\n delete timing.end;\n delete timing.delay;\n }\n\n private _save(name: string, timing: Timing) {\n let timingCache = this._timingCache.get(name);\n if (!timingCache) {\n timingCache = [];\n this._timingCache.set(name, timingCache);\n }\n\n timingCache.push(timing);\n\n if (timingCache.length > Benchmark.cacheSize) {\n timingCache.splice(0, timingCache.length - Benchmark.cacheSize);\n }\n }\n}\n"]} \ No newline at end of file diff --git a/es5/utils/support.d.ts b/es5/utils/support.d.ts new file mode 100644 index 0000000..3176ebd --- /dev/null +++ b/es5/utils/support.d.ts @@ -0,0 +1,17 @@ +/** + * @private + */ +export declare function isBrowserSupported(): boolean; +/** + * Check if the current browser is officially supported by twilio-video-procesors.js. + * This is set to `true` for chromium-based desktop browsers. + * @example + * ```ts + * import { isSupported } from '@twilio/video-processors'; + * + * if (isSupported) { + * // Initialize the background processors + * } + * ``` + */ +export declare const isSupported: boolean; diff --git a/es5/utils/support.js b/es5/utils/support.js new file mode 100644 index 0000000..9e5f4f7 --- /dev/null +++ b/es5/utils/support.js @@ -0,0 +1,24 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.isSupported = exports.isBrowserSupported = void 0; +/** + * @private + */ +function isBrowserSupported() { + return !!window.OffscreenCanvas && !(/Mobi/i.test(window.navigator.userAgent)) && !!window.chrome; +} +exports.isBrowserSupported = isBrowserSupported; +/** + * Check if the current browser is officially supported by twilio-video-procesors.js. + * This is set to `true` for chromium-based desktop browsers. + * @example + * ```ts + * import { isSupported } from '@twilio/video-processors'; + * + * if (isSupported) { + * // Initialize the background processors + * } + * ``` + */ +exports.isSupported = isBrowserSupported(); +//# sourceMappingURL=support.js.map \ No newline at end of file diff --git a/es5/utils/support.js.map b/es5/utils/support.js.map new file mode 100644 index 0000000..487cad3 --- /dev/null +++ b/es5/utils/support.js.map @@ -0,0 +1 @@ +{"version":3,"file":"support.js","sourceRoot":"","sources":["../../lib/utils/support.ts"],"names":[],"mappings":";;;AAAA;;GAEG;AACF,SAAgB,kBAAkB;IACjC,OAAO,CAAC,CAAC,MAAM,CAAC,eAAe,IAAI,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC;AACpG,CAAC;AAFA,gDAEA;AAED;;;;;;;;;;;GAWG;AACW,QAAA,WAAW,GAAG,kBAAkB,EAAE,CAAC","sourcesContent":["/**\n * @private\n */\n export function isBrowserSupported() {\n return !!window.OffscreenCanvas && !(/Mobi/i.test(window.navigator.userAgent)) && !!window.chrome;\n}\n\n/**\n * Check if the current browser is officially supported by twilio-video-procesors.js.\n * This is set to `true` for chromium-based desktop browsers.\n * @example\n * ```ts\n * import { isSupported } from '@twilio/video-processors';\n *\n * if (isSupported) {\n * // Initialize the background processors\n * }\n * ```\n */\n export const isSupported = isBrowserSupported();\n"]} \ No newline at end of file diff --git a/es5/utils/version.d.ts b/es5/utils/version.d.ts new file mode 100644 index 0000000..f640437 --- /dev/null +++ b/es5/utils/version.d.ts @@ -0,0 +1,4 @@ +/** + * The current version of the library. + */ +export declare const version: string; diff --git a/es5/utils/version.js b/es5/utils/version.js new file mode 100644 index 0000000..17197a8 --- /dev/null +++ b/es5/utils/version.js @@ -0,0 +1,9 @@ +"use strict"; +// This file is generated on build. To make changes, see scripts/version.js +Object.defineProperty(exports, "__esModule", { value: true }); +exports.version = void 0; +/** + * The current version of the library. + */ +exports.version = '1.0.1'; +//# sourceMappingURL=version.js.map \ No newline at end of file diff --git a/es5/utils/version.js.map b/es5/utils/version.js.map new file mode 100644 index 0000000..b65c820 --- /dev/null +++ b/es5/utils/version.js.map @@ -0,0 +1 @@ +{"version":3,"file":"version.js","sourceRoot":"","sources":["../../lib/utils/version.ts"],"names":[],"mappings":";AAAA,2EAA2E;;;AAE3E;;GAEG;AACU,QAAA,OAAO,GAAW,OAAO,CAAC","sourcesContent":["// This file is generated on build. To make changes, see scripts/version.js\n\n/**\n * The current version of the library.\n */\nexport const version: string = '1.0.1';\n"]} \ No newline at end of file diff --git a/lib/utils/version.ts b/lib/utils/version.ts new file mode 100644 index 0000000..136d5fe --- /dev/null +++ b/lib/utils/version.ts @@ -0,0 +1,6 @@ +// This file is generated on build. To make changes, see scripts/version.js + +/** + * The current version of the library. + */ +export const version: string = '1.0.1'; diff --git a/package.json b/package.json index 055ba5c..1a30362 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "@twilio/video-processors", "title": "Twilio Video Processors", "description": "Twilio Video Processors JavaScript Library", - "version": "1.0.1-dev", + "version": "1.0.1", "homepage": "https://github.com/twilio/twilio-video-processors.js#readme", "author": "Charlie Santos ", "contributors": [