Retrain image detection with MobileNet

后端 未结 4 1155
执念已碎
执念已碎 2020-12-06 12:01

Several ways of retraining MobileNet for use with Tensorflow.js have failed for me. Is there any way to use a retrained model with Tensorflow.js?

Both using the mode

4条回答
  •  -上瘾入骨i
    2020-12-06 12:24

    Maybe somebody can modify retain.py to support mobileV2 use my way. The original retrain.py link. This link is Google's GitHub code, not my link.

    I changed retrain.py, the below is my git diff:

    diff --git a/scripts/retrain.py b/scripts/retrain.py
    index 5fa9b0f..02a4f9a 100644
    --- a/scripts/retrain.py
    +++ b/scripts/retrain.py
    @@ -1,3 +1,5 @@
    +# -*- coding: utf-8 -*-
    +
     # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
     #
     # Licensed under the Apache License, Version 2.0 (the "License");
    @@ -112,6 +114,13 @@ from tensorflow.python.framework import graph_util
     from tensorflow.python.framework import tensor_shape
     from tensorflow.python.platform import gfile
     from tensorflow.python.util import compat
    +from tensorflow import saved_model as sm
    +from tensorflow.python.saved_model import builder as saved_model_builder
    +from tensorflow.python.saved_model import signature_constants
    +from tensorflow.python.saved_model import signature_def_utils
    +from tensorflow.python.saved_model import tag_constants
    +from tensorflow.python.saved_model import utils as saved_model_utils
    +
    
     FLAGS = None
    
    @@ -319,6 +328,7 @@ def maybe_download_and_extract(data_url):
       Args:
         data_url: Web location of the tar file containing the pretrained model.
       """
    +  print(FLAGS.model_dir)
       dest_directory = FLAGS.model_dir
       if not os.path.exists(dest_directory):
         os.makedirs(dest_directory)
    @@ -827,6 +837,7 @@ def save_graph_to_file(sess, graph, graph_file_name):
           sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
       with gfile.FastGFile(graph_file_name, 'wb') as f:
         f.write(output_graph_def.SerializeToString())
    +
       return
    
    
    @@ -971,6 +982,7 @@ def main(_):
    
       # Prepare necessary directories  that can be used during training
       prepare_file_system()
    +  sigs = {}
    
       # Gather information about the model architecture we'll be using.
       model_info = create_model_info(FLAGS.architecture)
    @@ -1002,6 +1014,9 @@ def main(_):
           FLAGS.random_brightness)
    
       with tf.Session(graph=graph) as sess:
    +    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    +    feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}
    +    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
         # Set up the image decoding sub-graph.
         jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
             model_info['input_width'], model_info['input_height'],
    @@ -1133,6 +1148,73 @@ def main(_):
                               (test_filename,
                                list(image_lists.keys())[predictions[i]]))
    
    +    """
    +    # analyze SignatureDef protobuf
    +    SignatureDef_d = graph.signature_def
    +    SignatureDef = SignatureDef_d[sm.signature_constants.CLASSIFY_INPUTS]
    +
    +    # three TensorInfo protobuf
    +    X_TensorInfo = SignatureDef.inputs['input_1']
    +    scale_TensorInfo = SignatureDef.inputs['input_2']
    +    y_TensorInfo = SignatureDef.outputs['output']
    +
    +    # Tensor details
    +    # .get_tensor_from_tensor_info() to get default graph 
    +    X = sm.utils.get_tensor_from_tensor_info(X_TensorInfo, sess.graph)
    +    scale = sm.utils.get_tensor_from_tensor_info(scale_TensorInfo, sess.graph)
    +    y = sm.utils.get_tensor_from_tensor_info(y_TensorInfo, sess.graph)
    +    """
    +
    +    """
    +    output_graph_def = graph_util.convert_variables_to_constants(
    +      sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
    +
    +    X_TensorInfo = sm.utils.build_tensor_info(bottleneck_input)
    +    scale_TensorInfo = sm.utils.build_tensor_info(ground_truth_input)
    +    y_TensorInfo = sm.utils.build_tensor_info(output_graph_def)
    +
    +    # build SignatureDef protobuf
    +    SignatureDef = sm.signature_def_utils.build_signature_def(
    +                                inputs={'input_1': X_TensorInfo, 'input_2': scale_TensorInfo},
    +                                outputs={'output': y_TensorInfo},
    +                                method_name='what'
    +    )
    +    """
    +
    +    #graph = tf.get_default_graph()
    +    tensors_per_node = [node.values() for node in graph.get_operations()]
    +    tensor_names = [tensor.name for tensors in tensors_per_node for tensor in tensors]
    +    print(tensor_names)
    +
    +    export_dir = './tf_files/savemode'
    +    builder = saved_model_builder.SavedModelBuilder(export_dir)
    +
    +    # name="" is important to ensure we don't get spurious prefixing
    +    graph_def = tf.GraphDef()
    +    tf.import_graph_def(graph_def, name="")
    +    g = tf.get_default_graph()
    +    inp1 = g.get_tensor_by_name("input:0")
    +    inp2 = g.get_tensor_by_name("input_1/BottleneckInputPlaceholder:0")
    +    inp3 = g.get_tensor_by_name("input_1/GroundTruthInput:0")
    +    out = g.get_tensor_by_name("accuracy_1:0")
    +
    +    sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
    +        tf.saved_model.signature_def_utils.predict_signature_def(
    +            {'input_1': inp1, 'input_2': inp3}, {"output": out})
    +
    +    builder.add_meta_graph_and_variables(sess,
    +                                         tags=[tag_constants.SERVING],
    +                                         signature_def_map=sigs)
    +
    +    """
    +    builder.add_meta_graph_and_variables(
    +            sess=sess,
    +            tags=[tag_constants.SERVING],
    +            signature_def_map={sm.signature_constants.CLASSIFY_INPUTS: SignatureDef})
    +    """
    +
    +    builder.save()
    +
         # Write out the trained graph and labels with the weights stored as
         # constants.
         save_graph_to_file(sess, graph, FLAGS.output_graph)
    

    Using my diff, I can generate Tensorflow Served model. And then I use the command to convert TensorFlow served model to Tfjs model.

    tensorflowjs_converter \
        --input_format=tf_saved_model \
        --output_format=tfjs_graph_model \
        ./tf_files/savemode \
        ./tf_files/js_model
    

    Still unsupported Ops for lasted Tensorflow JS version.

    I just make a video here to explain why we cannot convert Tensorflow frozen model to Tensorflow JS model, tells how to find the input Tensor and Output Tensor. The running steps and result, finally, give unsupported Ops ScalarSummary and the reason.

    Now that I cannot change the Mobilenet Model to Tensorflow JS model, so my workaround is using Python tensorflow and flask library on Server side, user upload the image to server and then return the result.

提交回复
热议问题