js在浏览器中face-api实时捕获摄像头人脸表情控制驱动vrm三维人物表情代码
代码语言:html
所属分类:三维
代码描述:js在浏览器中face-api实时捕获摄像头人脸表情控制驱动vrm三维人物表情代码,注意VRM模型中的表情通常使用blendshapes(也称为morph targets)来实现。VRM 规范定义了一套标准的表情blendshapes,但并不是所有的VRM模型都会包含所有这些表情。以下是VRM标准中定义的表情blendshapes及其对应的face-api.js表情: neutral - 对应 face-api.js 的 "neutral" a - 张嘴(可用于 "surprised" 或 "happy
代码标签: js 浏览器 face-api 捕获 摄像头 人脸 表情 控制 vrm 三维 人物 表情
下面为部分代码预览,完整代码请点击下载或在bfwstudio webide中打开
<!DOCTYPE html> <html> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no" /> <style> body { margin: 0; } canvas { display: block; } #video { position: absolute; top: 10px; right: 10px; width: 320px; height: 240px; } </style> </head> <body> <video id="video" width="320" height="240" autoplay muted></video> <script type="text/javascript" src="//repo.bfw.wiki/bfwrepo/js/face-api.1.7.13.js"></script> <script type="importmap"> { "imports": { "three": "//repo.bfw.wiki/bfwrepo/js/module/three/build/164/three.module.js", "three/addons/": "//repo.bfw.wiki/bfwrepo/js/module/three/examples/164/jsm/", "@pixiv/three-vrm": "//repo.bfw.wiki/bfwrepo/js/three-vrm.module.3.0.0.js" } } </script> <script type="module"> import * as THREE from 'three'; import { GLTFLoader } from 'three/addons/loaders/GLTFLoader.js'; import { OrbitControls } from 'three/addons/controls/OrbitControls.js'; import { VRMLoaderPlugin, VRMUtils } from '@pixiv/three-vrm'; let currentVrm, faceDetection; const modelPath = '../models/'; // path to model folder that will be loaded using http const minScore = 0.2; // minimum score const maxResults = 5; // maximum number of results to return let optionsSSDMobileNet; // 尝试获取第三个摄像头(如果存在的话) const devices = await navigator.mediaDevices.enumerateDevices(); const videoDevices = devices.filter(device => device.kind === 'videoinput'); const deviceId = videoDevices.length >= 1 ? videoDevices[0].deviceId : undefined; const stream = await navigator.mediaDevices.getUserMedia({ video: deviceId ? { deviceId: { exact: deviceId } } : true }); video.srcObject = stream; async function detectVideo() { const t0 = performance.now(); faceapi .detectAllFaces(video, optionsSSDMobileNet) .withFaceLandmarks() .withFaceExpressions() // .withFaceDescriptors() .withAgeAndGender() .then((result) => { const fps = 1000 / (performance.now() - t0); console.log(result[0]); updateVRMExpressions(result[0]); requestAnimationFrame(() => detectVideo()); return true; }) .catch((err) => { console.log(err); requestAnimationFrame(() => detectVideo()); return true; }); return false; } async function setupFaceAPI() { // load face-api models // log('Models loading'); // await faceapi.nets.tinyFaceDetector.load(modelPath); // using ssdMobilenetv1 await faceapi.nets.ssdMobilenetv1.load(modelPath); await faceapi.nets.ageGenderNet.load(modelPath); await faceapi.nets.faceLandmark68Net.load(modelPath); await faceapi.nets.faceRecognitionNet.load(modelPath); await faceapi.nets.faceExpressionNet.load(modelPath); optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults }); // check tf engine state } // 初始化face-api async function initVrm() { // renderer const renderer = new THREE.WebGLRenderer(); renderer.setSize( window.innerWidth, window.innerHeight ); renderer.setPixelRatio( window.devicePixelRatio ); document.body.appendChild( renderer.domElement ); // camera const camera = new THREE.PerspectiveCamera( 30.0, window.innerWidth / window.innerHeight, 0.1, 20.0 ); camera.position.set( 0.0, 1.0, 5.0 ); // camera controls const controls = new OrbitControls( camera, renderer.domElement ); controls.screenSpacePanning = true; controls.target.set( 0.0, 1.0, 0.0 ); controls.update(); // scene const scene = new THREE.Scene(); // light const light = new THREE.DirectionalLight( 0xffffff, Math.PI ); .........完整代码请登录后点击上方下载按钮下载查看
网友评论0