简体   繁体   中英

How to extract the detected faces from face-api.js

I am using a Javascript library called face-api.js .

I need to extract the face from the video frame when face-api detects a face. Could anyone help me to do that part?

const video = document.getElementById('video');

Promise.all([
    faceapi.nets.tinyFaceDetector.loadFromUri('/models')
]).then(startVideo)

function startVideo() {
    navigator.getUserMedia(
        {video: {}},
        stream => video.srcObject = stream,
        err => console.error(err)
    )
}

video.addEventListener('play', () => {
    const canvas = faceapi.createCanvasFromMedia(video);
    document.body.append(canvas);
    const displaySize = {width: video.width, height: video.height};
    faceapi.matchDimensions(canvas, displaySize);
    setInterval(async () => {
        const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())

        console.log('Box: ', detections[0].detection._box);

        const resizedDetections = faceapi.resizeResults(detections, displaySize)

        canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
        faceapi.draw.drawDetections(canvas, resizedDetections)
    }, 5000)
})

Add extractFaceFromBox function to your code, it can extract a face from video frames with giving bounding box and display result into outputimage. Try this code and enjoy

// This is your code
video.addEventListener('play', () => {
    const canvas = faceapi.createCanvasFromMedia(video);
    document.body.append(canvas);
    const displaySize = {width: video.width, height: video.height};
    faceapi.matchDimensions(canvas, displaySize);
    setInterval(async () => {
        const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
        
        //Call this function to extract and display face
        extractFaceFromBox(video, detections[0].detection.box)
        
        const resizedDetections = faceapi.resizeResults(detections, displaySize)

        canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
        faceapi.draw.drawDetections(canvas, resizedDetections)
    }, 5000)
})

let outputImage = document.getElementById('outputImage')

// This function extract a face from video frame with giving bounding box and display result into outputimage
async function extractFaceFromBox(inputImage, box){ 
    const regionsToExtract = [
        new faceapi.Rect( box.x, box.y , box.width , box.height)
    ]
                        
    let faceImages = await faceapi.extractFaces(inputImage, regionsToExtract)
    
    if(faceImages.length == 0){
        console.log('Face not found')
    }
    else
    {
        faceImages.forEach(cnv =>{      
            outputImage.src = cnv.toDataURL();      
        })
    }   
}                       
                
                
                

This is not specific to face-api.js but you can use canvas to extract an image from a video. Here is a little function I wrote in my case.

const extractFace = async (video,x,y,width, height) => {
    const canvas = document.createElement("canvas");
    canvas.width = video.videoWidth;
    canvas.height = video.videoHeight;
    const context = canvas.getContext("2d");

    // Get a screenshot from the video 
    context?.drawImage(video, 0, 0, canvas.width, canvas.height);
    const dataUrl = canvas.toDataURL("image/jpeg");

    const image = new Image();
    image.src = dataUrl;

    const canvasImg = document.createElement("canvas");
    canvasImg.width = width;
    canvasImg.height = height;
    const ctx = canvasImg.getContext("2d");
    image.onload = () => {
      // Crop the image 
      ctx?.drawImage(image, x, y, width, height, 0, 0, width, height);
      canvasImg.toBlob((blob) => {
        // Do something with the blob. Alternatively, you can convert it to a DataUrl like the video screenshot
        // I was using react so I just called my handler
        handSavePhoto(blob);
      }, "image/jpeg");
    };
  };

You don't have to take the screenshot first, you can just go ahead and crop it but I found out after testing that cropping from an image gives consistent results. Here is how you will achieve it in that case.


const extractFace = async (video, x, y, width, height) => {
  const canvas = document.createElement("canvas");
  canvas.width = width;
  canvas.height = height;
  const context = canvas.getContext("2d");

  // Get a screenshot from the video
  context?.drawImage(image, x, y, width, height, 0, 0, width, height);
  canvas.toBlob((blob) => {
    handSavePhoto(blob);
  }, "image/jpeg");
};

With that out of the way, you can now use face-api data to get the face you want.

// assuming your video element is store in video variable
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions());
const {x, y, width, height} = detections[0].detection.box;
extractFace(video, x, y, width, height);

You can read more about drawImage from here .

Check if detection.length is bigger than 0. It means that it detects something in front of it.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM