TensorFlow.js实现智能防挡弹幕
去年B站的智能防挡弹幕在朋友圈火了一阵,以后看小视频再也不用关弹幕了。
其实通过web RTC 以及 tensorflow.js官网提供训的模型,我们也可以实现智能防挡弹幕的效果
TensorFlow.js
大名鼎鼎tensorflow的js版本,并且推出了小程序的js插件,这吸引力足以让不少下班后准备去峡谷散步的同学放下鼠标。BodyPix
Tensorflow官方提供训练好的模型,能识别和处理图像内的人物边界。web RTC
基于webRTC我们可以实现视频的流式传播,处理帧数据。
基本思路
获取到视频每帧数据后,通过BodyPix获取人像数据,然后再将处理后的帧数据通过putImageData渲染到canvas上。
关键代码
const net = await loadBodyPix();
const segmentation = await net.segmentPerson(localVideo);
const coloredPartImage = bodyPix.toMask(
segmentation,
{ r: 0, g: 0, b: 0, a: 255 },
{ r: 0, g: 0, b: 0, a: 0 }
);
完整代码
<html>
<head>
<!-- Load TensorFlow.js -->
<!-- Get latest version at https://github.com/tensorflow/tfjs -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.2"></script>
<!-- Load BodyPix -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/body-pix@2.0"></script>
<style>
body {
margin: 0;
padding: 0;
}
.canvas {
position: absolute;
top: 0;
left: 0;
width: 800px;
height: 600px;
}
.tm {
position: fixed;
font-size: 30px;
color: #fff;
font-weight: bolder;
right: -20%;
animation: 5s linear infinite toleft;
white-space: nowrap;
}
.tm-1 {
top: 20px;
animation-delay: 1s;
}
.tm-2 {
top: 60px;
animation-delay: 2s;
}
.tm-3 {
top: 100px;
animation-delay: 3s;
}
@keyframes *toleft* {
from {
right: -20%;
}
to {
right: 120%;
}
}
</style>
</head>
<body>
<video id="video" width="800" height="600"></video>
<div class="tm tm-1">我的天啦 ! 这也太厉害了吧!</div>
<div class="tm tm-2">老铁,双击666</div>
<div class="tm tm-3">感谢金克丝送的一枚火箭</div>
<canvas class="canvas" id="canvas"></canvas>
</body>
<script>
const localVideo = document.getElementById("video");
const canvas = document.getElementById("canvas");
const ctx = canvas.getContext("2d");
canvas.width = 800;
canvas.height = 600;
// 加载模型数据
let net;
async function loadBodyPix() {
if (net) return BodynetPix;
net = await bodyPix.load();
}
// 绘制帧数据到canvas
async function drawCanvas() {
if (!net) return;
const segmentation = await net.segmentPerson(localVideo);
const coloredPartImage = bodyPix.toMask(
segmentation,
{ r: 0, g: 0, b: 0, a: 255 },
{ r: 0, g: 0, b: 0, a: 0 }
);
ctx.drawImage(localVideo, 0, 0, 800, 600);
const imageData = ctx.getImageData(0, 0, 800, 600);
const size = imageData.width * imageData.height * 4;
for (let i = 0; i < size; i++) {
if ((i + 1) % 4) {
coloredPartImage.data[i] = imageData.data[i];
}
}
ctx.putImageData(coloredPartImage, 0, 0, 0, 0, 800, 600);
}
function learnLinear() {
const mediaStreamConstraints = {
video: true
};
// 用于播放视频流stream 的 video元素.
// Local stream that will be reproduced on the video.
let localStream;
// success* 处理函数;* by adding the MediaStream to the video element.
function gotLocalMediaStream(*mediaStream*) {
localStream = mediaStream;
localVideo.srcObject = mediaStream;
}
// error 处理函数; 将 error 信息打印到 console.
function handleLocalMediaStreamError(*error*) {
console.log("navigator.getUserMedia error: ", error);
}
navigator.mediaDevices
.getUserMedia(mediaStreamConstraints)
.then(function(stream) {
console.log("getUserMedia() got stream: ", stream);
if ("srcObject" in video) {
localVideo.srcObject = stream;
} else {
// 防止在新的浏览器里使用它,应为它已经不再支持了
localVideo.src = window.URL.createObjectURL(stream);
}
localVideo.onloadedmetadata = function(e) {
localVideo.play();
};
localVideo.addEventListener(
"play",
function() {
setInterval(() => {
drawCanvas();
}, 0);
},
false
);
})
.catch(function(error) {
console.log("navigator.getUserMedia error: ", error);
});
loadBodyPix();
}
learnLinear();
</script>
<html></html>
</html>
0条评论