import tensorflow as tf import numpy as np import cv2 import time import pdb import os def fun(net,img_batch,orig_h,orig_w): sess = net['sess'] inp = net['inp'] out = net['out'] out1 = net['out1'] out2 = net['out2'] out3 = net['out3'] roi_cord_list = [] roi_conf_list = [] feed_dict = {inp : np.concatenate(img_batch, 0)} output = sess.run(out, feed_dict) output1 = sess.run(out1, feed_dict) output2 = sess.run(out2, feed_dict) output3 = sess.run(out3, feed_dict) print(output.shape) print(output1.shape) print(output2.shape) print(output3.shape) # print(output) output = output[0] output1 = output1[0] output2 = output2[0] # print(output) for i in range(len(output)): print(output[i],output1[i],output2[i]) def init_tf(GPU_ID, modelfile, batch_size, frm_r, frm_c):#used in test functions device_name = "GPU:"+str(GPU_ID) with tf.device(device_name): #init config cfg = dict({'allow_soft_placement': True,'log_device_placement': False}) utility = 0.9 cfg['gpu_options'] = tf.GPUOptions(per_process_gpu_memory_fraction = utility) cfg['allow_soft_placement'] = True cfg['device_count'] = {'GPU': 1} cfg['use_per_session_threads'] = True sess = tf.Session(config = tf.ConfigProto(**cfg)) #load model with tf.gfile.FastGFile(modelfile, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def,name="") inp = tf.get_default_graph().get_tensor_by_name('image_tensor:0') out = tf.get_default_graph().get_tensor_by_name('detection_boxes:0') out1 = tf.get_default_graph().get_tensor_by_name('detection_classes:0') out2 = tf.get_default_graph().get_tensor_by_name('detection_scores:0') out3 = tf.get_default_graph().get_tensor_by_name('num_detections:0') c= inp.shape[3].value h= inp.shape[1].value w= inp.shape[2].value sess.run(tf.global_variables_initializer()) tempimg = np.zeros((frm_r, frm_c, 3), dtype = "uint8") #for dummy pass tempimg = np.expand_dims(tempimg, 0) list_ = [] for i in range(batch_size): list_.append(tempimg) feed_dict = {inp : np.concatenate(list_, 0)} output = sess.run(out, feed_dict) net = {'sess':sess,\ 'inp':inp,\ 'out':out,\ 'c':c,\ 'h':h,\ 'w':w,\ 'min_prob':0.3,\ 'min_iou':0.8,\ 'batsiz':batch_size,\ 'out1':out1,\ 'out2':out2,\ 'out3':out3} return net if __name__ == '__main__': model_path = "../models/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb" frm_r = 300 frm_c = 300 net = init_tf(0, model_path, 1, frm_r, frm_c) bs = [1] # bs = [6,7,8,16,32] num_itr = 1 for bs_i in bs: net['batsiz'] = bs_i tempi = cv2.imread("/home/neil/use/streams/sample_720p.jpg") h1, w1, _ = tempi.shape maxdim = max(h1,w1) tempimg = np.zeros((maxdim, maxdim, 3), dtype = "uint8") tempimg[0:h1,0:w1] = tempi Scaling = float(maxdim) / float(frm_c) # imsz1 = cv2.resize(tempimg, (frm_c,frm_r)) #maintain_aspect_ratio: 1 imsz1 = cv2.resize(tempi, (frm_c,frm_r)) #maintain_aspect_ratio:0 imsz1 = imsz1[:,:,(2,1,0)] this_inp = np.expand_dims(imsz1, 0) inp_feed = list() for i in range(net['batsiz']): inp_feed.append(this_inp) t_fullcall = 0.0 for itr in range(num_itr): t0 = time.time() fun(net,inp_feed,0,0) t1 = time.time() tic = t1 - t0 print(tic) print('done')