'Why is my Flask app slow on IOS on Heroku?

My Flask app has "acceptable" response time in localhost (680ms), worse on Heroku from desktop (1200-1300 ms), and awful on Heroku when accessed by mobile (up to 5-6 seconds).

The idea is to catch an image from mobile's camera via some HTML, do some OCR with tesseract (makeup_artist.py) and draw boxes where relevant text is (objDetect.js).

The problem is not about Heroku's dynos (I tried to swith to paid and nothing changed). The code seems ok (at least in localhost). Performance indicators in objDetect.js show that these tasks take milliseconds. When debugging IOS from Safari Desktop, it seems waiting times are just long.

Some code:

App.py

app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(stdout))
eventlet.monkey_patch(os=False, thread=False)
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = True #for the purpose of SSLify
socketio = SocketIO(app, applogger=True, engineio_logger=True)

def take_action(file, word_collection, manifestation_list):
    makeup_artist.apply_makeup(file, word_collection, manifestation_list)

@app.after_request
def after_request(response):
    response.headers.add('Access-Control-Allow-Origin', '*')
    response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
    response.headers.add('Access-Control-Allow-Methods', 'GET,POST') # Put any other methods you need here
    return response


@app.route('/')
def index():
    """Video streaming home page."""
    return render_template('index.html')

@app.route('/image', methods=['POST'])
def gen():
    """Video streaming generator function."""
    word_collection = []
    manifestation_list = []
    while True:
        try:
            npimg = numpy.fromfile(request.files['image'], numpy.uint8)
            file = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
            #cv2.imwrite("test.jpeg", file)
            word_collection = makeup_artist.apply_makeup(file, word_collection, manifestation_list)
        except:
            pass

        return Response(word_collection, mimetype='application/json')

makeup_artist.py


with open(os.path.dirname(__file__) + '/dictionary') as json_file:
    dictionary = pd.read_json(json_file)

def apply_makeup(image, word_collection, manifestation_list):
    pytesseract.pytesseract.tesseract_cmd = '/app/vendor/tesseract-ocr/bin/tesseract' ### Uncomment when deploying
    gry = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    d = pytesseract.image_to_data(gry, output_type=pytesseract.Output.DICT, lang="eng",
                                  config='tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz --dpi 300 -psm 6')
    for i in range(len(d['text'])):
        if not any(x in [d['left'][i], d['top'][i], d['width'][i], d['height'][i]] for x in [0, 720, 1280]):
            for manifestation in dictionary['Manifestation']:
                if manifestation.lower() == (d['text'][i]):
                    if manifestation not in manifestation_list:
                        manifestation_list.append(manifestation)
                        (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
                        manifestation_index = dictionary[dictionary['Manifestation'] == manifestation].index
                        class_name = dictionary.iloc[manifestation_index[0]]['Class']
                        json_string = {'class_name': class_name, 'manifestation_name': manifestation, 'x': x, 'y': y, 'w': w, 'h': h}
                        word_collection.append(json_string)

    word_collection = json.dumps(word_collection)
    return word_collection

objDetect.js

//Parameters
const s = document.getElementById('objDetect');
const sourceVideo = s.getAttribute("data-source");  //the source video to use
const uploadWidth = s.getAttribute("data-uploadWidth") || 1280; //the width of the upload file
const mirror = s.getAttribute("data-mirror") || true; //mirror the boundary boxes
const updateInterval = s.getAttribute("data-updateInterval") || 100; //the max rate to upload images
const cameraOutput = document.querySelector("#camera--output")

//Video element selector
v = document.getElementById(sourceVideo);
//for starting events
let isPlaying = false,
    gotMetadata = false;

let lastFrameData = null,
    lastFrameTime = null;

//Canvas setup

//create a canvas to grab an image for upload
let imageCanvas = document.createElement('canvas');
let imageCtx = imageCanvas.getContext("2d");

//create a canvas for drawing object boundaries
let drawCanvas = document.createElement('canvas');
document.body.appendChild(drawCanvas);
let drawCtx = drawCanvas.getContext("2d");

function getRandomIntInclusive(min, max) {
  min = Math.ceil(min);
  max = Math.floor(max);
  return Math.floor(Math.random() * (max - min + 1) + min); //The maximum is inclusive and the minimum is inclusive
}

//draw boxes and labels on each detected object
function drawBoxes(objects) {
    var startTime_drawBoxes = performance.now()
    //clear the previous drawings
    drawCtx.clearRect(0, 0, drawCanvas.width, drawCanvas.height);

    //filter out objects that contain a class_name and then draw boxes and labels on each
    objects.filter(object => object.class_name).forEach(object => {

        let x = object.x;
        let y = object.y;
        let width = object.w;
        let height = object.h;

        drawCtx.fillText(object.class_name, x + 5, y + 20);
        drawCtx.strokeRect(x, y, width, height);

    });
    var endtime_drawBoxes = performance.now()
    console.log(`Call to drawBoxes Function took ${endtime_drawBoxes - startTime_drawBoxes} milliseconds`)

}

//Add file blob to a form and post
function postFile(file) {
    var startTime_postFile = performance.now()
    //Set options as form data
    let formdata = new FormData();
    formdata.append("image", file);
    let xhr = new XMLHttpRequest();
    xhr.open('POST', window.location.origin + '/image', true);
    xhr.setRequestHeader("X-Requested-With", "XMLHttpRequest");
    xhr.setRequestHeader("HTTP_X_REQUESTED_WITH", "XMLHttpRequest");
    xhr.setRequestHeader("pragma", "no-cache");
    xhr.onreadystatechange = function () {
        if (xhr.readyState == 4) {
            let response = this.response;
            let error = '<iframe src="//www.herokucdn.com/error-pages/application-error.html"></iframe>';
            if (! response.includes(error)) {
                let objects = JSON.parse(this.response);

                //console.log(objects);

                //draw the boxes
                drawBoxes(objects);

                let event = new CustomEvent('objectDetection', {detail: objects});
                document.dispatchEvent(event);

                //start over
                sendImageFromCanvas();
                };
        };
    };
    xhr.send(formdata);
    var endTime_postFile = performance.now()
    console.log(`Call to postFile Function took ${endTime_postFile - startTime_postFile} milliseconds`)
}

//Check if the image has changed & enough time has passed sending it to the API
function sendImageFromCanvas() {
    var startTime_sendImageFromCanvas = performance.now()
    imageCtx.drawImage(v, 0, 0, v.videoWidth, v.videoHeight, 0, 0, uploadWidth, uploadWidth * (v.videoHeight / v.videoWidth));

    lastFrameTime = new Date();
    imageCanvas.toBlob(postFile, 'image/jpeg');
    //cameraOutput.src = imageCanvas.toDataURL("image/jpeg")

    var sendImageFromCanvas_endTime = performance.now()
    console.log(`Call to imageChange Function took ${sendImageFromCanvas_endTime - startTime_sendImageFromCanvas} milliseconds`)
}

//Start object detection
function startObjectDetection() {
    var startTime_startObjectDetection = performance.now()
    console.log("starting object detection");

    //Set canvas sizes base don input video
    drawCanvas.width = v.videoWidth;
    drawCanvas.height = v.videoHeight;

    imageCanvas.width = uploadWidth;
    imageCanvas.height = uploadWidth * (v.videoHeight / v.videoWidth);

    //Some styles for the drawcanvas
    drawCtx.lineWidth = "1";
    drawCtx.strokeStyle = "cyan";
    drawCtx.font = "20px Verdana";
    drawCtx.fillStyle = "cyan";
    //Save and send the first image
    imageCtx.drawImage(v, 0, 0, v.videoWidth, v.videoHeight, 0, 0, uploadWidth, uploadWidth * (v.videoHeight / v.videoWidth));
    imageCanvas.toBlob(postFile, 'image/jpeg');
    var endTime_startObjectDetection = performance.now()
    console.log(`Call to startObjectDetection Function took ${endTime_startObjectDetection - startTime_startObjectDetection} milliseconds`)

}

//check if metadata is ready - we need the video size
v.onloadedmetadata = () => {
    console.log("video metadata ready");
    gotMetadata = true;
    if (isPlaying)
        startObjectDetection();
};

//see if the video has started playing
v.onplaying = () => {
    console.log("video playing");
    isPlaying = true;
    if (gotMetadata) {
        startObjectDetection();
    }
};

main.js

//Get camera video
const constraints = {
    audio: false,
    video: {
        width: {ideal: 1280},
        height: {ideal: 720},
        facingMode: {exact: 'environment'}
    }
};

navigator.mediaDevices.getUserMedia(constraints)
    .then(stream => {
        document.getElementById("myVideo").srcObject = stream;
        //console.log("Got local user video");

    })
    .catch(err => {
        console.log('navigator.getUserMedia error: ', err)
    });


Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source