diff --git a/logo_detection.py b/logo_detection.py index 637b8cb..f379f93 100644 --- a/logo_detection.py +++ b/logo_detection.py @@ -41,9 +41,9 @@ def load_image_into_numpy_array(image): def run_inference_for_single_image(image, graph): with graph.as_default(): - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: # Get handles to input and output tensors - ops = tf.get_default_graph().get_operations() + ops = tf.compat.v1.get_default_graph().get_operations() all_tensor_names = {output.name for op in ops for output in op.outputs} tensor_dict = {} for key in [ @@ -52,7 +52,7 @@ def run_inference_for_single_image(image, graph): ]: tensor_name = key + ':0' if tensor_name in all_tensor_names: - tensor_dict[key] = tf.get_default_graph().get_tensor_by_name( + tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name( tensor_name) if 'detection_masks' in tensor_dict: # The following processing is only for single image @@ -69,7 +69,7 @@ def run_inference_for_single_image(image, graph): # Follow the convention by adding back the batch dimension tensor_dict['detection_masks'] = tf.expand_dims( detection_masks_reframed, 0) - image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0') + image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:-1') # Run inference output_dict = sess.run(tensor_dict, @@ -95,10 +95,10 @@ def run_inference_for_single_image(image, graph): # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = args.label_map - detection_graph = tf.Graph() + detection_graph = tf.compat.v1.Graph() with detection_graph.as_default(): - od_graph_def = tf.GraphDef() - with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: + od_graph_def = tf.compat.v1.GraphDef() + with tf.io.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='')