'add age and gender using face-api.js
How can I add age and gender to the code use face-api.js?
I have difficulty with faceapi.nets. _______________. loadFromUri ('/ models') I should add to this and how to do in faceapi.draw .____________ what to put.
const video = document.getElementById('video')
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
]).then(startVideo)
startVideo()
function startVideo() {
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = { width: video.width, height: video.height }
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
faceapi.draw.drawAge(canvas, agage_gender_model)
}, 100)
})
Solution 1:[1]
To load the age and gender include below code snippet.
faceapi.nets.ageGenderNet.loadFromUri('/models')
You'll also need to append withAgeAndGender() to the detections, as shown below
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
In some browsers, the way you are querying the media is deprecated. instead of
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
Use
navigator.mediaDevices.getUserMedia({
video: true,
audio: false
})
.then(
(cameraStream) => {
video.srcObject = cameraStream;
}
)
But to display the age and gender, you need below code snippet to draw a box and label it.
resizedDetections.forEach( detection => {
const box = detection.detection.box
const drawBox = new faceapi.draw.DrawBox(box, { label: Math.round(detection.age) + " year old " + detection.gender })
drawBox.draw(canvas)
})
These changes will make your final script to look like below:
const video = document.getElementById('video')
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models'),
faceapi.nets.ageGenderNet.loadFromUri('/models')
]).then(startVideo)
startVideo()
function startVideo() {
/*
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
*/
navigator.mediaDevices.getUserMedia({
video: true,
audio: false
})
.then(
(cameraStream) => {
video.srcObject = cameraStream;
}
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = { width: video.width, height: video.height }
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
resizedDetections.forEach( detection => {
const box = detection.detection.box
const drawBox = new faceapi.draw.DrawBox(box, { label: Math.round(detection.age) + " year old " + detection.gender })
drawBox.draw(canvas)
})
}, 100)
})
Solution 2:[2]
Change this line
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
to
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
Hope this helps.
Solution 3:[3]
change your code like below. (if you are working on ubuntu you have to use ./models for import instead of /models)
const video = document.getElementById('video')
Promise.all([
//i used ./models because im working on ubuntu
faceapi.nets.tinyFaceDetector.loadFromUri('./models'),
faceapi.nets.faceLandmark68Net.loadFromUri('./models'),
faceapi.nets.faceRecognitionNet.loadFromUri('./models'),
faceapi.nets.faceExpressionNet.loadFromUri('./models'),
faceapi.nets.ageGenderNet.loadFromUri('./models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = { width: video.width, height: video.height }
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
resizedDetections.forEach( detection => {
const box = detection.detection.box
const drawBox = new faceapi.draw.DrawBox(box, { label: Math.round(detection.age) + " year old " + detection.gender })
drawBox.draw(canvas)
})
}, 500)
})
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|---|
Solution 1 | ArthurKay |
Solution 2 | Ike Anya |
Solution 3 |