'Tensorflow Lite and representative datasets

does anyone see what is wrong with my code? I really don't get the reason for the exception.

def repr_data_gen():
  for e, _ in train_gen_base.take(8):
    for i in range(e.shape[0]):
      img = e[i, :]
      yield [img.numpy()]


pt_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = repr_data_gen,
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8  # or tf.uint8
converter.inference_output_type = tf.int8  # or tf.uint8
tflite_quant_model = converter.convert()

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
~/Dev/sandbox/intception_v4/convert.py in 
     153 converter.inference_input_type = tf.int8  # or tf.uint8
     154 converter.inference_output_type = tf.int8  # or tf.uint8
---> 155 tflite_quant_model = converter.convert()
     156 

~/.conda/envs/tflitemicro_v2/lib/python3.8/site-packages/tensorflow/lite/python/lite.py in convert(self)
   1055           graph=frozen_func.graph)
   1056 
-> 1057     result = super(TFLiteKerasModelConverterV2,
   1058                    self).convert(graph_def, input_tensors, output_tensors)
   1059     self._increase_conversion_success_metric(result)

~/.conda/envs/tflitemicro_v2/lib/python3.8/site-packages/tensorflow/lite/python/lite.py in convert(self, graph_def, input_tensors, output_tensors)
    793 
    794     if calibrate_and_quantize:
--> 795       result = self._calibrate_quantize_model(result, **flags)
    796 
    797     flags_modify_model_io_type = quant_mode.flags_modify_model_io_type(

~/.conda/envs/tflitemicro_v2/lib/python3.8/site-packages/tensorflow/lite/python/lite.py in _calibrate_quantize_model(self, result, inference_input_type, inference_output_type, activations_type, allow_float)
    519                                                 custom_op_registerers_by_func)
    520     if self._experimental_calibrate_only or self.experimental_new_quantizer:
--> 521       calibrated = calibrate_quantize.calibrate(
    522           self.representative_dataset.input_gen)
    523 

~/.conda/envs/tflitemicro_v2/lib/python3.8/site-packages/tensorflow/lite/python/optimize/calibrator.py in calibrate(self, dataset_gen)
    167     """
    168     initialized = False
--> 169     for sample in dataset_gen():
    170       if not initialized:
    171         initialized = True

TypeError: 'tuple' object is not callable

I started debugging the tensorflow lite package but i still have no clue what the problem is. The train_gen_base is a tensorflow.dataset containing tensors of the shape (batchsize, img_dim1, img_dim2, 3)



Solution 1:[1]

The issue is because of the comma you put after repr_data_gen .

def repr_data_gen():
  for e, _ in train_gen_base.take(8):
    for i in range(e.shape[0]):
      img = e[i, :]
      yield [img.numpy()]// I suggest you remove the numpy operator here


pt_model.trainable = False
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = repr_data_gen  // issue here you put comma mistakely here in original code
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8  # or tf.uint8
converter.inference_output_type = tf.int8  # or tf.uint8
tflite_quant_model = converter.convert()

Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source
Solution 1 Tensorflow Support