running https://github.com/NVIDIA-AI-IOT/tf_trt_models/blob/master/examples/detection/detection.ipynb in nvidia/tensorflow:19.01-p3 container with Tesla V100 16GB:
the provided detection.py allows to download and use the following models:
MODELS = {
'ssd_mobilenet_v1_coco': DetectionModel(
'ssd_mobilenet_v1_coco',
'http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz',
'ssd_mobilenet_v1_coco_2018_01_28',
),
'ssd_mobilenet_v2_coco': DetectionModel(
'ssd_mobilenet_v2_coco',
'http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz',
'ssd_mobilenet_v2_coco_2018_03_29',
),
'ssd_inception_v2_coco': DetectionModel(
'ssd_inception_v2_coco',
'http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz',
'ssd_inception_v2_coco_2018_01_28',
),
'ssd_resnet_50_fpn_coco': DetectionModel(
'ssd_resnet_50_fpn_coco',
'http://download.tensorflow.org/models/object_detection/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz',
'ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03',
),
'faster_rcnn_resnet50_coco': DetectionModel(
'faster_rcnn_resnet50_coco',
'http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz',
'faster_rcnn_resnet50_coco_2018_01_28',
),
'faster_rcnn_nas': DetectionModel(
'faster_rcnn_nas',
'http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_coco_2018_01_28.tar.gz',
'faster_rcnn_nas_coco_2018_01_28',
),
'mask_rcnn_resnet50_atrous_coco': DetectionModel(
'mask_rcnn_resnet50_atrous_coco',
'http://download.tensorflow.org/models/object_detection/mask_rcnn_resnet50_atrous_coco_2018_01_28.tar.gz',
'mask_rcnn_resnet50_atrous_coco_2018_01_28',
)
}
optimizing SSD_mobileNet runs without problems, however
trying
MODEL = 'ssd_resnet_50_fpn_coco'
and optimizing with
frozen_graph, input_names, output_names = build_detection_graph(
config=config_path,
checkpoint=checkpoint_path,
batch_size=1
)
gives following error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-dcab7238b8e0> in <module>
2 config=config_path,
3 checkpoint=checkpoint_path,
----> 4 batch_size=1
5 )
/workspace/tf_trt_models/tf_trt_models/detection.py in build_detection_graph(config, checkpoint, batch_size, score_threshold, force_nms_cpu, replace_relu6, remove_assert, input_shape, output_dir)
148 checkpoint_path,
149 output_dir,
--> 150 input_shape=[batch_size, None, None, 3]
151 )
152
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/exporter.py in export_inference_graph(input_type, pipeline_config, trained_checkpoint_prefix, output_directory, input_shape, output_collection_name, additional_output_tensor_names, write_inference_graph)
472 output_collection_name,
473 graph_hook_fn=None,
--> 474 write_inference_graph=write_inference_graph)
475 pipeline_config.eval_config.use_moving_averages = False
476 config_util.save_pipeline_config(pipeline_config, output_directory)
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/exporter.py in _export_inference_graph(input_type, detection_model, use_moving_averages, trained_checkpoint_prefix, output_directory, additional_output_tensor_names, input_shape, output_collection_name, graph_hook_fn, write_inference_graph)
383 input_shape=input_shape,
384 output_collection_name=output_collection_name,
--> 385 graph_hook_fn=graph_hook_fn)
386
387 saver_kwargs = {}
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/exporter.py in _build_detection_graph(input_type, detection_model, input_shape, output_collection_name, graph_hook_fn)
351 input_tensors=input_tensors,
352 detection_model=detection_model,
--> 353 output_collection_name=output_collection_name)
354
355 # Add global step to the graph.
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/exporter.py in _get_outputs_from_inputs(input_tensors, detection_model, output_collection_name)
328 preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
329 output_tensors = detection_model.predict(
--> 330 preprocessed_inputs, true_image_shapes)
331 postprocessed_tensors = detection_model.postprocess(
332 output_tensors, true_image_shapes)
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/meta_architectures/ssd_meta_arch.py in predict(self, preprocessed_inputs, true_image_shapes)
391 [preprocessed_inputs]):
392 feature_maps = self._feature_extractor.extract_features(
--> 393 preprocessed_inputs)
394 feature_map_spatial_dims = self._get_feature_map_spatial_dims(
395 feature_maps)
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py in extract_features(self, preprocessed_inputs)
155 [(key, image_features[key])
156 for key in ['block2', 'block3', 'block4']],
--> 157 depth=256)
158 last_feature_map = fpn_features['top_down_block4']
159 coarse_features = {}
~/.local/lib/python3.5/site-packages/object_detection-0.1-py3.5.egg/object_detection/models/feature_map_generators.py in fpn_top_down_feature_maps(image_features, depth, scope)
223 output_feature_map_keys.append('top_down_%s' % image_features[level][0])
224 return collections.OrderedDict(
--> 225 reversed(zip(output_feature_map_keys, output_feature_maps_list)))
226
227
TypeError: argument to reversed() must be a sequence
Other models than the default SSD_mobileNet all throw errors during optimization.
Is there something else that needs to be cosidered when optimizing these models ?
I want to be able to optimze the tensorflow object_detection models from the tensorflow model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) to run performance comparisons.