Prediction using InceptionV3 in Tensorflow

匿名 (未验证) 提交于 2019-12-03 08:59:04

问题:

I have trained InceptionV3 model in tensor flow on my own dataset. I have the checkpoint file and the graph (.meta) from training. I am using these files to classify the label of a new image. Till now I have the following:

inception_v3 code from TFslim

def inception_v3(inputs,                  dropout_keep_prob=0.8,                  num_classes=1000,                  is_training=True,                  restore_logits=True,                  scope=''):   """Latest Inception from http://arxiv.org/abs/1512.00567.     "Rethinking the Inception Architecture for Computer Vision"     Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,     Zbigniew Wojna   Args:     inputs: a tensor of size [batch_size, height, width, channels].     dropout_keep_prob: dropout keep_prob.     num_classes: number of predicted classes.     is_training: whether is training or not.     restore_logits: whether or not the logits layers should be restored.       Useful for fine-tuning a model with different num_classes.     scope: Optional scope for name_scope.   Returns:     a list containing 'logits', 'aux_logits' Tensors.   """   # end_points will collect relevant activations for external use, for example   # summaries or losses.   end_points = {}   with tf.name_scope(scope, 'inception_v3', [inputs]):     with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],                           is_training=is_training):       with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],                             stride=1, padding='VALID'):         # 299 x 299 x 3         end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,                                          scope='conv0')         # 149 x 149 x 32         end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],                                          scope='conv1')         # 147 x 147 x 32         end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],                                          padding='SAME', scope='conv2')         # 147 x 147 x 64         end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],                                            stride=2, scope='pool1')         # 73 x 73 x 64         end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],                                          scope='conv3')         # 73 x 73 x 80.         end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],                                          scope='conv4')         # 71 x 71 x 192.         end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],                                            stride=2, scope='pool2')         # 35 x 35 x 192.         net = end_points['pool2']       # Inception blocks       with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],                             stride=1, padding='SAME'):         # mixed: 35 x 35 x 256.         with tf.variable_scope('mixed_35x35x256a'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 64, [1, 1])           with tf.variable_scope('branch5x5'):             branch5x5 = ops.conv2d(net, 48, [1, 1])             branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])           with tf.variable_scope('branch3x3dbl'):             branch3x3dbl = ops.conv2d(net, 64, [1, 1])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 32, [1, 1])           net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)           end_points['mixed_35x35x256a'] = net         # mixed_1: 35 x 35 x 288.         with tf.variable_scope('mixed_35x35x288a'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 64, [1, 1])           with tf.variable_scope('branch5x5'):             branch5x5 = ops.conv2d(net, 48, [1, 1])             branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])           with tf.variable_scope('branch3x3dbl'):             branch3x3dbl = ops.conv2d(net, 64, [1, 1])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 64, [1, 1])           net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)           end_points['mixed_35x35x288a'] = net         # mixed_2: 35 x 35 x 288.         with tf.variable_scope('mixed_35x35x288b'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 64, [1, 1])           with tf.variable_scope('branch5x5'):             branch5x5 = ops.conv2d(net, 48, [1, 1])             branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])           with tf.variable_scope('branch3x3dbl'):             branch3x3dbl = ops.conv2d(net, 64, [1, 1])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 64, [1, 1])           net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)           end_points['mixed_35x35x288b'] = net         # mixed_3: 17 x 17 x 768.         with tf.variable_scope('mixed_17x17x768a'):           with tf.variable_scope('branch3x3'):             branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')           with tf.variable_scope('branch3x3dbl'):             branch3x3dbl = ops.conv2d(net, 64, [1, 1])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])             branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],                                       stride=2, padding='VALID')           with tf.variable_scope('branch_pool'):             branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')           net = tf.concat([branch3x3, branch3x3dbl, branch_pool], 3)           end_points['mixed_17x17x768a'] = net         # mixed4: 17 x 17 x 768.         with tf.variable_scope('mixed_17x17x768b'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 192, [1, 1])           with tf.variable_scope('branch7x7'):             branch7x7 = ops.conv2d(net, 128, [1, 1])             branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])             branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])           with tf.variable_scope('branch7x7dbl'):             branch7x7dbl = ops.conv2d(net, 128, [1, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])             branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 192, [1, 1])           net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)           end_points['mixed_17x17x768b'] = net         # mixed_5: 17 x 17 x 768.         with tf.variable_scope('mixed_17x17x768c'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 192, [1, 1])           with tf.variable_scope('branch7x7'):             branch7x7 = ops.conv2d(net, 160, [1, 1])             branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])             branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])           with tf.variable_scope('branch7x7dbl'):             branch7x7dbl = ops.conv2d(net, 160, [1, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])             branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 192, [1, 1])           net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)           end_points['mixed_17x17x768c'] = net         # mixed_6: 17 x 17 x 768.         with tf.variable_scope('mixed_17x17x768d'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 192, [1, 1])           with tf.variable_scope('branch7x7'):             branch7x7 = ops.conv2d(net, 160, [1, 1])             branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])             branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])           with tf.variable_scope('branch7x7dbl'):             branch7x7dbl = ops.conv2d(net, 160, [1, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])             branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 192, [1, 1])           net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)           end_points['mixed_17x17x768d'] = net         # mixed_7: 17 x 17 x 768.         with tf.variable_scope('mixed_17x17x768e'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 192, [1, 1])           with tf.variable_scope('branch7x7'):             branch7x7 = ops.conv2d(net, 192, [1, 1])             branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])             branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])           with tf.variable_scope('branch7x7dbl'):             branch7x7dbl = ops.conv2d(net, 192, [1, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])             branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 192, [1, 1])           net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)           end_points['mixed_17x17x768e'] = net         # Auxiliary Head logits         aux_logits = tf.identity(end_points['mixed_17x17x768e'])         with tf.variable_scope('aux_logits'):           aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,                                     padding='VALID')           aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')           # Shape of feature map before the final layer.           shape = aux_logits.get_shape()           aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,                                   padding='VALID')           aux_logits = ops.flatten(aux_logits)           aux_logits = ops.fc(aux_logits, num_classes, activation=None,                               stddev=0.001, restore=restore_logits)           end_points['aux_logits'] = aux_logits         # mixed_8: 8 x 8 x 1280.         # Note that the scope below is not changed to not void previous         # checkpoints.         # (TODO) Fix the scope when appropriate.         with tf.variable_scope('mixed_17x17x1280a'):           with tf.variable_scope('branch3x3'):             branch3x3 = ops.conv2d(net, 192, [1, 1])             branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,                                    padding='VALID')           with tf.variable_scope('branch7x7x3'):             branch7x7x3 = ops.conv2d(net, 192, [1, 1])             branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])             branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])             branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],                                      stride=2, padding='VALID')           with tf.variable_scope('branch_pool'):             branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')           net = tf.concat([branch3x3, branch7x7x3, branch_pool], 3)           end_points['mixed_17x17x1280a'] = net         # mixed_9: 8 x 8 x 2048.         with tf.variable_scope('mixed_8x8x2048a'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 320, [1, 1])           with tf.variable_scope('branch3x3'):             branch3x3 = ops.conv2d(net, 384, [1, 1])             branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),                                    ops.conv2d(branch3x3, 384, [3, 1])], 3)           with tf.variable_scope('branch3x3dbl'):             branch3x3dbl = ops.conv2d(net, 448, [1, 1])             branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])             branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),                                       ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 192, [1, 1])           net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)           end_points['mixed_8x8x2048a'] = net         # mixed_10: 8 x 8 x 2048.         with tf.variable_scope('mixed_8x8x2048b'):           with tf.variable_scope('branch1x1'):             branch1x1 = ops.conv2d(net, 320, [1, 1])           with tf.variable_scope('branch3x3'):             branch3x3 = ops.conv2d(net, 384, [1, 1])             branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),                                    ops.conv2d(branch3x3, 384, [3, 1])], 3)           with tf.variable_scope('branch3x3dbl'):             branch3x3dbl = ops.conv2d(net, 448, [1, 1])             branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])             branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),                                       ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)           with tf.variable_scope('branch_pool'):             branch_pool = ops.avg_pool(net, [3, 3])             branch_pool = ops.conv2d(branch_pool, 192, [1, 1])           net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)           end_points['mixed_8x8x2048b'] = net         # Final pooling and prediction         with tf.variable_scope('logits'):           shape = net.get_shape()           net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')           # 1 x 1 x 2048           net = ops.dropout(net, dropout_keep_prob, scope='dropout')           net = ops.flatten(net, scope='flatten')           # 2048           logits = ops.fc(net, num_classes, activation=None, scope='logits',                           restore=restore_logits)           # 1000           end_points['logits'] = logits           end_points['predictions'] = tf.nn.softmax(logits, name='predictions')       return logits, end_points

Prediction code

config = tf.ConfigProto(allow_soft_placement=True) saver = tf.train.import_meta_graph('path/to/meta/graph') graph = tf.get_default_graph() with tf.Session(config=config,graph=graph) as sess:         print graph         saver.restore(sess,'/path/to/chpk/')         init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())         sess.run(init_op)         print ('Restored checkpoint file and graph')       tens=image_preprocessing(tf.read_file('/serving/13_left.jpeg'))         with slim.arg_scope(inception_arg_scope()):                logits = inception_v3(tf.expand_dims(tens,0),                                num_classes=6,                                is_training=False)         prob = tf.nn.softmax(logits)         sess.run(prob)

This gives the following error:

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2 	 [[Node: mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2/read = Identity[T=DT_FLOAT, _class=["loc:@mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2"], _device="/job:localhost/replica:0/task:0/cpu:0"](mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2)]]

I am fairly new to Tensorflow so any help here is much appreciated. I am doing something wrong, but just cannot figure it out. Thanks in advance :)

EDIT 1

I restarted my session by reloading the graph and weights into the session. I froze the graph and saved it as frozen_graph.pb. Following is my code:

def freeze_graph(model_folder):     # We retrieve our checkpoint fullpath     checkpoint = tf.train.get_checkpoint_state(model_folder)     input_checkpoint = checkpoint.model_checkpoint_path      # We precise the file fullname of our freezed graph     absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])     output_graph = absolute_model_folder + "/frozen_model.pb"      # Before exporting our graph, we need to precise what is our output node     # This is how TF decides what part of the Graph he has to keep and what part it can dump     output_node_names = "tower_0/logits/predictions"      # We clear devices to allow TensorFlow to control on which device it will load operations     clear_devices = True      # We import the meta graph and retrieve a Saver     saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)      # We retrieve the protobuf graph definition     graph = tf.get_default_graph()     input_graph_def = graph.as_graph_def()      # We start a session and restore the graph weights     with tf.Session() as sess:         saver.restore(sess, input_checkpoint)         for op in  sess.graph.get_operations():                 print(op.name)         # We use a built-in TF helper to export variables to constants         output_graph_def = graph_util.convert_variables_to_constants(             sess, # The session is used to retrieve the weights             input_graph_def # The graph_def is used to retrieve the nodes             ,output_node_names.split(",")) # The output node names are used to select the usefull nodes           # Finally we serialize and dump the output graph to the filesystem         with tf.gfile.GFile(output_graph, "wb") as f:             f.write(output_graph_def.SerializeToString())         print("%d ops in the final graph." % len(output_graph_def.node))   if __name__ == '__main__':     parser = argparse.ArgumentParser()     parser.add_argument("--model_folder", type=str, help="Model folder to export")     args = parser.parse_args()     freeze_graph(args.model_folder)

I load the frozen graph into a new session. Following are my node names:

prefix/batch_processing/batch_join/fifo_queue prefix/batch_processing/batch_join/n prefix/batch_processing/batch_join prefix/batch_processing/Reshape/shape prefix/batch_processing/Reshape . . . prefix/logits/logits/weights prefix/logits/logits/weights/read prefix/logits/logits/biases prefix/logits/logits/biases/read prefix/tower_0/logits/logits/xw_plus_b/MatMul prefix/tower_0/logits/logits/xw_plus_b prefix/tower_0/logits/predictions

I use the input node and final node (predictions) to classify a new image. Following is my code for this:

def load_graph(frozen_graph_filename):     # We load the protobuf file from the disk and parse it to retrieve the      # unserialized graph_def     with tf.gfile.GFile(frozen_graph_filename, "rb") as f:         graph_def = tf.GraphDef()         graph_def.ParseFromString(f.read())      # Then, we can use again a convenient built-in function to import a graph_def into the      # current default Graph     with tf.Graph().as_default() as graph:         tf.import_graph_def(             graph_def,             input_map=None,             return_elements=None,             name="prefix",             op_dict=None,             producer_op_list=None         )     return graph  #graph = load_graph('/serving/frozen_model.pb')  if __name__ == '__main__':     import scipy.misc     # Let's allow the user to pass the filename as an argument     parser = argparse.ArgumentParser()     parser.add_argument("--frozen_model_filename", default="/serving/frozen_model.pb", type=str, help="Frozen model file to import")     args = parser.parse_args()      # We use our "load_graph" function     graph = load_graph(args.frozen_model_filename)      # We can verify that we can access the list of operations in the graph     for op in graph.get_operations():         print(op.name)         # prefix/Placeholder/inputs_placeholder         # ...         # prefix/Accuracy/predictions      # We access the input and output nodes      x = graph.get_tensor_by_name('prefix/batch_processing/batch_join/fifo_queue:0')     y = graph.get_tensor_by_name('prefix/tower_0/logits/predictions:0')     image_data = tf.gfile.FastGFile('/serving/13_left.jpeg', 'rb').read()     # We launch a Session     with tf.Session(graph=graph) as sess:         # Note: we didn't initialize/restore anything, everything is stored in the graph_def         y_out = sess.run(y, feed_dict={             x: image_data}) # < 45          print(y_out) # [[ False ]] Yay, it works!

I get the following error.

Traceback (most recent call last):   File "predict_3.py", line 183, in <module>     x: image_data}) # < 45   File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 767, in run     run_metadata_ptr)   File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 929, in _run     subfeed_dtype = subfeed_t.dtype.as_numpy_dtype   File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/dtypes.py", line 138, in as_numpy_dtype     return _TF_TO_NP[self._type_enum] KeyError: 20

Not sure of what is going wrong now. I see my nodes are correct. Any help on this is much appreciated.

回答1:

Finally I figured out how to resolve this issue. We have to freeze the graph in order to produce a .pb file that lets us re-use the nodes for prediction. Following is my code to do this:

def freeze_graph(model_folder):     # We retrieve our checkpoint fullpath     checkpoint = tf.train.get_checkpoint_state(model_folder)     input_checkpoint = checkpoint.model_checkpoint_path      # We precise the file fullname of our freezed graph     absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])     output_graph = absolute_model_folder + "/frozen_model.pb"      # Before exporting our graph, we need to precise what is our output node     # This is how TF decides what part of the Graph he has to keep and what part it can dump     # NOTE: this variable is plural, because you can have multiple output nodes     output_node_names = "tower_0/logits/predictions"      # We clear devices to allow TensorFlow to control on which device it will load operations     clear_devices = True      # We import the meta graph and retrieve a Saver     saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)      # We retrieve the protobuf graph definition     graph = tf.get_default_graph()     input_graph_def = graph.as_graph_def()      # We start a session and restore the graph weights     with tf.Session() as sess:         saver.restore(sess, input_checkpoint)         for op in  sess.graph.get_operations():                 print(op.name)         # We use a built-in TF helper to export variables to constants         output_graph_def = graph_util.convert_variables_to_constants(             sess, # The session is used to retrieve the weights             input_graph_def # The graph_def is used to retrieve the nodes             ,output_node_names.split(",")) # The output node names are used to select the usefull nodes           # Finally we serialize and dump the output graph to the filesystem         with tf.gfile.GFile(output_graph, "wb") as f:             f.write(output_graph_def.SerializeToString())         print("%d ops in the final graph." % len(output_graph_def.node))   if __name__ == '__main__':     parser = argparse.ArgumentParser()     parser.add_argument("--model_folder", type=str, help="Model folder to export")     args = parser.parse_args()      freeze_graph(args.model_folder)

After this, we can load the graph. Following is my code for this:

def load_graph(frozen_graph_filename):     # We load the protobuf file from the disk and parse it to retrieve the      # unserialized graph_def     with tf.gfile.GFile(frozen_graph_filename, "rb") as f:         graph_def = tf.GraphDef()         graph_def.ParseFromString(f.read())      # Then, we can use again a convenient built-in function to import a graph_def into the      # current default Graph     with tf.Graph().as_default() as graph:         tf.import_graph_def(             graph_def,             input_map=None,             return_elements=None,             name="prefix",             op_dict=None,             producer_op_list=None         )     return graph

This will create your protocol buffer (frozen_graph.pb), which we can lo

if __name__ == '__main__':     # Let's allow the user to pass the filename as an argument     parser = argparse.ArgumentParser()     parser.add_argument("--frozen_model_filename", default="/serving/frozen_model.pb", type=str, help="Frozen model file to import")     parser.add_argument("--image_name",type=str,help="Image to test")     args = parser.parse_args()      # Create test batch     image_data = create_test_batch(args.image_name)     # We use our "load_graph" function     graph = load_graph(args.frozen_model_filename)      # We can verify that we can access the list of operations in the graph     #for op in graph.get_operations():         #print(op.name)         # prefix/Placeholder/inputs_placeholder         # ...         # prefix/Accuracy/predictions      # We access the input and output nodes      x = graph.get_tensor_by_name('prefix/batch_processing/Reshape:0') # Input tensor     y = graph.get_tensor_by_name('prefix/tower_0/logits/predictions:0') # Output tensor      # We launch a Session     with tf.Session(graph=graph) as sess:         # Note: we didn't initialize/restore anything, everything is stored in the graph_def         y_out = sess.run(y, feed_dict={             x:image_data}) # < 45             print(y_out)

My input node (x) expects a batch of size 64 and size 299x299x3 so I hacked my way by duplicating my test image 64 times and creating an input batch. I do this in the following way:

def create_test_batch(input_image):         data = []         img = cv2.imread(input_image) # Read the test image         img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # Convert RGB image to YUV         # equalize the histogram of the Y channel         img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])         # convert the YUV image back to RGB format         img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)         img_resize = cv2.resize(img_output,(299,299)) # Resize the image acceptable by InceptionV3 model         for i in range(0,64):                 data.append(img_resize) # Create a batch of 64 images                  #data.append(np.resize((ndimage.imread('/serving/'+input_image)),(299,299,3)))         print np.shape(data)         return data

If there is a better way of solving the input batch problem, I will appreciate an answer.



标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!