'Why is my view not updating when value is updated using @Published and @ObservedObject?
I am creating an object recognition app that takes frames from the camera and outputs a description of the image into a text view on the screen. I can print the output to the console, but cannot get the view to update with the text using the ObservableObject protocol. I have looked for solutions but none have seemed to work.
Camera View:
struct Analysis {
var description: String = "No description"
var confidence: Double = 0.0
}
final class CameraView: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, ObservableObject {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
let dataOutput = AVCaptureVideoDataOutput()
var frameCounter = 0
@Published var analysis = Analysis()
override func viewDidLoad() {
super.viewDidLoad()
NotificationCenter.default.addObserver(self, selector: #selector(CameraView.rotated), name: UIDevice.orientationDidChangeNotification, object: nil)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
frameCounter += 1
if frameCounter == 15 {
frameCounter = 0
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
guard let model = try? VNCoreMLModel(for: Resnet50().model) else {return}
let request = VNCoreMLRequest(model: model) { finishedReq, err in
guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
guard let firstObservation = results.first else {return}
self.analysis = Analysis(description: firstObservation.identifier, confidence: Double(firstObservation.confidence))
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
print(self.analysis)
}
}
extension CameraView: UIViewControllerRepresentable {
public typealias UIViewControllerType = CameraView
func makeUIViewController(context: UIViewControllerRepresentableContext<CameraView>) -> CameraView {
return CameraView()
}
func updateUIViewController(_ uiViewController: CameraView, context: UIViewControllerRepresentableContext<CameraView>) {
}
}
Content View:
struct ContentView: View {
@ObservedObject var camera = CameraView()
var body: some View {
ZStack {
camera
.ignoresSafeArea()
VStack {
Spacer()
ZStack {
Rectangle()
.frame(height: 75)
.background(.regularMaterial)
.cornerRadius(20)
.padding()
Text(camera.analysis.description)
.font(.largeTitle)
}
}
}
}
}
Please let me know if you need more information.
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|