'TensorCamera on React-Native does not work
So I'm trying to make an app that get's a model from Google Teachable Machine and detect it with tensor camera but tensor camera doesn't work. Everytime when i start the app i get black screen and these warnings:
[Unhandled promise rejection: TypeError: gl.createVertexArray is not a function. (In 'gl.createVertexArray()', 'gl.createVertexArray' is undefined)]
[Unhandled promise rejection: TypeError: null is not an object (evaluating '_this3.camera.props')]
Below is my App.js code:
import { StatusBar } from 'expo-status-bar';
import React, { useState, useEffect, useRef } from 'react';
import { StyleSheet, Text, View, ActivityIndicator, Platform } from 'react-native';
import * as tf from '@tensorflow/tfjs';
import { cameraWithTensors } from '@tensorflow/tfjs-react-native';
import { Camera } from 'expo-camera';
import { getModel, startPrediction } from './helpers/tensor-helper';
export default function App() {
const TensorCamera = cameraWithTensors(Camera);
const cameraRef = useRef();
const [isTfReady, setIsTfReady] = useState(false);
const [isProcessing, setIsProcessing] = useState(false);
const [model, setModel] = useState();
function handleCameraStream(images, updatePreview, gl) {
const loop = async () => {
const nextImageTensor = images.next.value();
if (!model || !nextImageTensor) throw new Error('No model or image tensor');
const output = startPrediction();
console.log(output);
updatePreview();
gl.endFrameEXP();
requestAnimationFrame(loop);
}
loop();
}
useEffect(() => {
const yukle = async () => {
console.log('girdi');
const { status } = await Camera.requestCameraPermissionsAsync();
await tf.ready();
setModel(await getModel())
}
yukle();
}, [])
let textureDims;
if (Platform.OS === 'ios') {
textureDims = {
height: 1920,
width: 1080,
};
} else {
textureDims = {
height: 1200,
width: 1600,
};
}
return (
<View>
<TensorCamera
// Standard Camera props
style={styles.camera}
type={Camera.Constants.Type.front}
// Tensor related props
cameraTextureHeight={textureDims.height}
cameraTextureWidth={textureDims.width}
resizeHeight={200}
resizeWidth={152}
resizeDepth={3}
onReady={(images, updatePreview, gl) => handleCameraStream(images, updatePreview, gl)}
autorender={true}
/>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#fff',
alignItems: 'center',
justifyContent: 'center',
},
camera: {
width: '100%',
height: '100%'
}
});
And these are the helper functions:
import * as tf from '@tensorflow/tfjs';
import '@tensorflow/tfjs-react-native';
import { bundleResourceIO, decodeJpeg } from '@tensorflow/tfjs-react-native';
const BITMAP_DIMENSION = 224;
const modelJson = require('../model/model.json');
const modelWeights = require('../model/weights.bin');
// 0: channel from JPEG-encoded image
// 1: gray scale
// 3: RGB image
const TENSORFLOW_CHANNEL = 3;
export const getModel = async () => {
try {
// wait until tensorflow is ready
await tf.ready();
// load the trained model
return await tf.loadLayersModel(bundleResourceIO(modelJson, modelWeights));
} catch (error) {
console.log('Could not load model', error);
}
};
export const startPrediction = async (model, tensor) => {
try {
// predict against the model
const output = await model.predict(tensor);
// return typed array
return output.dataSync();
} catch (error) {
console.log('Error predicting from tesor image', error);
}
};
I would be glad if anyone can help. Thank you.
Solution 1:[1]
Try changing your onReady function to this:
onReady={handleCameraStream}
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
| Solution | Source |
|---|---|
| Solution 1 | Salman Hanif |
