'How to reduce the opencv delay with RTSP on the live Feed

I am working on an AI based Emotion Recognition code using OpenCV / Deepface library.

I am connecting to the IP Camera via RTSP and I have a delay of 7/8 seconds. Does somebody know how to reduce the delay?

    while (self.Capture.isOpened()):
        ret, self.frame = self.Capture.read()
        if ret:

            key = cv2.waitKey(1)
            
            result = DeepFace.analyze(self.frame,actions = ['emotion'],enforce_detection=False)
            emotions = result['emotion']
            worry = (1.4*emotions['angry'] + 0.9*emotions['disgust'] + 1.2*emotions['fear'] + 1.43*emotions['sad']) - (0.83*emotions['happy'])
            if worry < 0:
                worry = 0

            gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            faces = Root.faceCascade.detectMultiScale(gray,1.05,8)

            font = cv2.FONT_HERSHEY_SIMPLEX
            current_time = time()
            if(current_time - Root.start <= 5):
                self.worry_counter += 1
                self.worry_index += worry
                cv2.putText(self.frame,result['dominant_emotion'],(50,50),font,3, (0,0,255),2,cv2.LINE_4)
            else:
                res_emotion = 100 if (self.top_par*self.worry_index)/(self.worry_counter*self.bot_par) > 100 else (self.top_par*self.worry_index)/(self.worry_counter*self.bot_par)  
                cv2.putText(self.frame,"Worry:{}%".format(res_emotion),(50,50),font,3, (0,0,255),2,cv2.LINE_4)
            
            Image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
            ConvertToQtFormat = QImage(Image.data, Image.shape[1], Image.shape[0], QImage.Format_RGB888)
            Pic = ConvertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
            self.ImageUpdate.emit(Pic)
               

def stop(self):
    pass
    #self.ThreadActive = False
    #self.quit()


Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source