1. 程式人生 > >tensorflow利用預訓練模型進行目標檢測(一):預訓練模型的使用

tensorflow利用預訓練模型進行目標檢測(一):預訓練模型的使用

err sync numpy sna sta porting trac git int32

一、運行樣例

官網鏈接:https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb 但是一直有問題,沒有運行起來,所以先使用一個別人寫好的代碼

上一個在ubuntu下可用的代碼鏈接:https://gitee.com/bubbleit/JianDanWuTiShiBie 使用python2運行,python3可能會有問題

該代碼由https://gitee.com/talengu/JianDanWuTiShiBie/tree/master而來,經過我部分的調整與修改,代碼包含在ODtest.py文件中,/ssd_mobilenet_v1_coco_11_06_2017中存儲的是預訓練模型

原始代碼如下

技術分享圖片
import numpy as np
from matplotlib import pyplot as plt
import os
import tensorflow as tf
from PIL import Image
from utils import label_map_util
from utils import visualization_utils as vis_util

import datetime
# 關閉tensorflow警告
os.environ[TF_CPP_MIN_LOG_LEVEL]=3

detection_graph = tf.Graph()

# 加載模型數據------------------------------------------------------------------------------------------------------- def loading(): with detection_graph.as_default(): od_graph_def = tf.GraphDef() PATH_TO_CKPT = ssd_mobilenet_v1_coco_11_06_2017 + /frozen_inference_graph.pb with tf.gfile.GFile(PATH_TO_CKPT,
rb) as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name=‘‘) return detection_graph # Detection檢測------------------------------------------------------------------------------------------------------- def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = os.path.join(data, mscoco_label_map.pbtxt) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=90, use_display_name=True) category_index = label_map_util.create_category_index(categories) def Detection(image_path="images/image1.jpg"): loading() with detection_graph.as_default(): with tf.Session(graph=detection_graph) as sess: # for image_path in TEST_IMAGE_PATHS: image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) image_tensor = detection_graph.get_tensor_by_name(image_tensor:0) # Each box represents a part of the image where a particular object was detected. boxes = detection_graph.get_tensor_by_name(detection_boxes:0) # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. scores = detection_graph.get_tensor_by_name(detection_scores:0) classes = detection_graph.get_tensor_by_name(detection_classes:0) num_detections = detection_graph.get_tensor_by_name(num_detections:0) # Actual detection. (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection.將識別結果標記在圖片上 vis_util.visualize_boxes_and_labels_on_image_array( image_np, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=8) # output result輸出 for i in range(3): if classes[0][i] in category_index.keys(): class_name = category_index[classes[0][i]][name] else: class_name = N/A print("物體:%s 概率:%s" % (class_name, scores[0][i])) # matplotlib輸出圖片 # Size, in inches, of the output images. IMAGE_SIZE = (20, 12) plt.figure(figsize=IMAGE_SIZE) plt.imshow(image_np) plt.show() # 運行 Detection()
View Code

git clone到本地後執行有幾個錯誤

問題1

報錯信息: UnicodeDecodeError: ascii codec cant decode byte 0xe5 in position 1: ordinal not in range(128)

solution:參考:https://www.cnblogs.com/QuLory/p/3615584.html

主要錯誤是上面最後一行的Unicode解碼問題,網上搜索說是讀取文件時使用的編碼默認時ascii而不是utf8,導致的錯誤;

在代碼中加上如下幾句即可。

import sys
reload(sys)
sys.setdefaultencoding(utf8)

問題1

報錯信息:_tkinter.TclError: no display name and no $DISPLAY environment variable 詳情:

技術分享圖片
Traceback (most recent call last):
  File "ODtest.py", line 103, in <module>
    Detection()
  File "ODtest.py", line 96, in Detection
    plt.figure(figsize=IMAGE_SIZE)
  File "/usr/local/lib/python2.7/dist-packages/matplotlib/pyplot.py", line 533, in figure
    **kwargs)
  File "/usr/local/lib/python2.7/dist-packages/matplotlib/backend_bases.py", line 161, in new_figure_manager
    return cls.new_figure_manager_given_figure(num, fig)
  File "/usr/local/lib/python2.7/dist-packages/matplotlib/backends/_backend_tk.py", line 1046, in new_figure_manager_given_figure
    window = Tk.Tk(className="matplotlib")
  File "/usr/lib/python2.7/lib-tk/Tkinter.py", line 1822, in __init__
    self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
_tkinter.TclError: no display name and no $DISPLAY environment variable
View Code

solution:參考:https://blog.csdn.net/qq_22194315/article/details/77984423

純代碼解決方案

這也是大部分人在網上諸如stackoverflow的問答平臺得到的解決方案,在引入pyplot、pylab之前,要先更改matplotlib的後端模式為”Agg”。直接貼代碼吧!

技術分享圖片
# do this before importing pylab or pyplot
Import matplotlib
matplotlib.use(Agg)
import matplotlib.pyplot asplt
View Code

修改之後代碼為:

技術分享圖片
#!usr/bin/python
# -*- coding: utf-8 -*-

import numpy as np
import matplotlib
matplotlib.use(Agg)
import matplotlib.pyplot 
from matplotlib import pyplot as plt
import os
import tensorflow as tf
from PIL import Image
from utils import label_map_util
from utils import visualization_utils as vis_util

import datetime
# 關閉tensorflow警告
import sys
reload(sys)
sys.setdefaultencoding(utf8)

os.environ[TF_CPP_MIN_LOG_LEVEL]=3

detection_graph = tf.Graph()

# 加載模型數據-------------------------------------------------------------------------------------------------------
def loading():

    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        PATH_TO_CKPT = ssd_mobilenet_v1_coco_11_06_2017 + /frozen_inference_graph.pb
        with tf.gfile.GFile(PATH_TO_CKPT, rb) as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name=‘‘)
    return detection_graph



# Detection檢測-------------------------------------------------------------------------------------------------------
def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
        (im_height, im_width, 3)).astype(np.uint8)
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(data, mscoco_label_map.pbtxt)
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=90, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

def Detection(image_path="images/image1.jpg"):
    loading()
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # for image_path in TEST_IMAGE_PATHS:
            image = Image.open(image_path)

            # the array based representation of the image will be used later in order to prepare the
            # result image with boxes and labels on it.
            image_np = load_image_into_numpy_array(image)

            # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
            image_np_expanded = np.expand_dims(image_np, axis=0)
            image_tensor = detection_graph.get_tensor_by_name(image_tensor:0)

            # Each box represents a part of the image where a particular object was detected.
            boxes = detection_graph.get_tensor_by_name(detection_boxes:0)

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            scores = detection_graph.get_tensor_by_name(detection_scores:0)
            classes = detection_graph.get_tensor_by_name(detection_classes:0)
            num_detections = detection_graph.get_tensor_by_name(num_detections:0)

            # Actual detection.
            (boxes, scores, classes, num_detections) = sess.run(
                [boxes, scores, classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})

            # Visualization of the results of a detection.將識別結果標記在圖片上
            vis_util.visualize_boxes_and_labels_on_image_array(
                 image_np,
                 np.squeeze(boxes),
                 np.squeeze(classes).astype(np.int32),
                 np.squeeze(scores),
                 category_index,
                 use_normalized_coordinates=True,
                 line_thickness=8)
            # output result輸出
            for i in range(3):
                if classes[0][i] in category_index.keys():
                    class_name = category_index[classes[0][i]][name]
                else:
                    class_name = N/A
                print("object:%s gailv:%s" % (class_name, scores[0][i]))
                
            # matplotlib輸出圖片
            # Size, in inches, of the output images.
            IMAGE_SIZE = (20, 12)
            plt.figure(figsize=IMAGE_SIZE)
            plt.imshow(image_np)
            plt.show()



# 運行
Detection()
View Code

運行結果:

技術分享圖片

如無意外,加上時間統計函數,調用已下載好的預訓練模型即可

二、使用與訓練模型

aa

tensorflow利用預訓練模型進行目標檢測(一):預訓練模型的使用