Tensorflow如何使用检查点和.pb文件

2024-04-28 14:28:41 发布

您现在位置:Python中文网/ 问答频道 /正文

如何使用训练后生成的数据检测图像上的对象

我尝试了什么:

我生成了一个充满检查点的文件:

enter image description here

根据一个教程,我必须将这些检查点转换成.pb文件,对吗

所以我创建了这个文件夹:

enter image description here

我不知道如何使用它来检测图像上的物体,你们有简单有效的代码吗

我尝试了这个,但在加载.pb文件时遇到问题

google.protobuf.message.DecodeError: Wrong wire type in tag.

你们中谁能测试一下我的.pb文件,告诉我这是问题还是其他原因download it there

代码Main.Py

# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys

# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")

# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util

# Grab path to current working directory
CWD_PATH = os.getcwd()

# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join("C:/####/workspace/training_demo/exported-models/my_model/saved_model/saved_model.pb")

# Path to label map file
PATH_TO_LABELS = os.path.join("C:/####/workspace/training_demo/annotations/label_map.pbtxt")

# Path to image
PATH_TO_IMAGE = os.path.join("C:/####/data/images/Ecran.png")

# Number of classes the object detector can identify
NUM_CLASSES = 52

# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.compat.v1.GraphDef()
    with tf.compat.v2.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

    sess = tf.Session(graph=detection_graph)

# Define input and output tensors (i.e. data) for the object detection classifier

# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')

# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_expanded = np.expand_dims(image_rgb, axis=0)

# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
    [detection_boxes, detection_scores, detection_classes, num_detections],
    feed_dict={image_tensor: image_expanded})

# Draw the results of the detection (aka 'visulaize the results')

vis_util.visualize_boxes_and_labels_on_image_array(
    image,
    np.squeeze(boxes),
    np.squeeze(classes).astype(np.int32),
    np.squeeze(scores),
    category_index,
    use_normalized_coordinates=True,
    line_thickness=8,
    min_score_thresh=0.60)

# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)

# Press any key to close the image
cv2.waitKey(0)

# Clean up
cv2.destroyAllWindows()

控制台日志

(####) C:\####>python main.py
2021-03-04 10:16:34.144777: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cudart64_110.dll
Traceback (most recent call last):
  File "main.py", line 63, in <module>
    od_graph_def.ParseFromString(serialized_graph)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\message.py", line 199, in ParseFromString
    return self.MergeFromString(serialized)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\python_message.py", line 1145, in MergeFromString
    if self._InternalParse(serialized, 0, length) != length:
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\python_message.py", line 1212, in InternalParse
    pos = field_decoder(buffer, new_pos, end, self, field_dict)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 754, in DecodeField
    if value._InternalParse(buffer, pos, new_pos) != new_pos:
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\python_message.py", line 1212, in InternalParse
    pos = field_decoder(buffer, new_pos, end, self, field_dict)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 733, in DecodeRepeatedField
    if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\python_message.py", line 1212, in InternalParse
    pos = field_decoder(buffer, new_pos, end, self, field_dict)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 888, in DecodeMap
    if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\python_message.py", line 1198, in InternalParse
    (data, new_pos) = decoder._DecodeUnknownField(
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 989, in _DecodeUnknownField
    (data, pos) = _DecodeUnknownFieldSet(buffer, pos)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 968, in _DecodeUnknownFieldSet
    (data, pos) = _DecodeUnknownField(buffer, pos, wire_type)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 989, in _DecodeUnknownField
    (data, pos) = _DecodeUnknownFieldSet(buffer, pos)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 968, in _DecodeUnknownFieldSet
    (data, pos) = _DecodeUnknownField(buffer, pos, wire_type)
  File "C:\Users\Dorian\anaconda3\envs\####\lib\site-packages\google\protobuf\internal\decoder.py", line 993, in _DecodeUnknownField
    raise _DecodeError('Wrong wire type in tag.')
google.protobuf.message.DecodeError: Wrong wire type in tag.

Tags: theinpyposimagepackagesgoogleline
1条回答
网友
1楼 · 发布于 2024-04-28 14:28:41
# Create and train a new model instance.
model = create_model()
model.fit(train_images, train_labels, epochs=5)

# Save the entire model as a SavedModel.
!mkdir -p saved_model
model.save('saved_model/my_model')

# Contains an assets folder, saved_model.pb, and variables folder.
ls saved_model/my_model
assets  saved_model.pb  variables

#Load the model
new_model = tf.keras.models.load_model('saved_model/my_model')

# Evaluate the restored model
loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))

print(new_model.predict(test_images).shape)

相关问题 更多 >