NANO自带的tensorrt运行卡慢,每帧图像处理速度在3s左右

问题描述: 在运行NANO自带的tensorrt包中的 tensorrt/samples/python/yolov3_onnx/onnx_to_tensorrt.py 时,运行卡慢,每帧图像处理速度在3s左右,相比于直接在darknet下使用yolov3进行检测时,无明显加速效果,求指点。

代码(onnx_to_tensorrt.py 见附件。
经测试,主要耗时的程序在boxes, classes, scores = postprocessor.process(trt_outputs, (shape_orig_WH)),每张图片该行程序耗时在2-3s.求助!
本人运行的步骤如下:

  1. $ sudo python yolov3_to_onnx.py
  2. $ sudo python onnx_to_tensorrt.py

onnx_to_tensorrt.py代码:

#!/usr/bin/env python2
#
# Copyright 1993-2019 NVIDIA Corporation.  All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee.  Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE.  IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users.  These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item.  Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#

from __future__ import print_function
import cv2 as cv
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import ImageDraw
import time
from yolov3_to_onnx import download_file
from data_processing import PreprocessYOLO, PostprocessYOLO, ALL_CATEGORIES

import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common

TRT_LOGGER = trt.Logger()
cap = cv.VideoCapture(0)

def load_label_categories(label_file_path):
   categories = [line.rstrip('\n') for line in open(label_file_path)]
   return categories

LABEL_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'coco_labels.txt')
ALL_CATEGORIES = load_label_categories(LABEL_FILE_PATH)

def get_engine1(vv):
   with trt.Runtime(TRT_LOGGER) as runtime:
   return runtime.deserialize_cuda_engine(vv)

def get_engine(onnx_file_path, engine_file_path=""):
   def build_engine():
   """Takes an ONNX file and creates a TensorRT engine to run inference with"""
   with trt.Builder(TRT_LOGGER) as builder,\
   builder.create_network() as network, \
   trt.OnnxParser(network, TRT_LOGGER) as parser:

   builder.max_workspace_size = 1 << 30 # 1GB
   builder.max_batch_size = 1

   if not os.path.exists(onnx_file_path):
   print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))
   exit(0)

   print('Loading ONNX file from path {}...'.format(onnx_file_path))
   with open(onnx_file_path, 'rb') as model:
   print('Beginning ONNX file parsing')
   parser.parse(model.read())
   print('Completed parsing of ONNX file')

   print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
   engine = builder.build_cuda_engine(network)
   print("Completed creating Engine")

   with open(engine_file_path, "wb") as f:
   f.write(engine.serialize())
   return engine

   if os.path.exists(engine_file_path):
   print("Reading engine from file {}".format(engine_file_path))
   with open(engine_file_path, "rb") as f, \
   trt.Runtime(TRT_LOGGER) as runtime:
   return runtime.deserialize_cuda_engine(f.read())
   else:
   return build_engine()

def main():
   """Create a TensorRT engine for ONNX-based YOLOv3-608 and run inference."""

   # Try to load a previously generated YOLOv3-608 network graph in ONNX format:
   onnx_file_path = './yolov3.onnx'
   engine_file_path = "./yolov3.trt"
   file = open(engine_file_path,"rb")
   f = file.read()
   file.close()
   engine= get_engine1(f)
   context = engine.create_execution_context()
   # with get_engine(onnx_file_path, engine_file_path) as engine:
   #     print("finished")
   # Download a dog image and save it to the following file path:
   while True:
   ret,frame = cap.read()
   if ret:
   x, y = frame.shape[0:2]
	        # Two-dimensional tuple with the target network's (spatial) input resolution in HW ordered
   input_resolution_yolov3_HW = (608, 608)
   # Create a pre-processor object by specifying the required input resolution for YOLOv3
   preprocessor = PreprocessYOLO(input_resolution_yolov3_HW)
   # Load an image from the specified input path, and return it together with  a pre-processed version
   image_raw, image = preprocessor.process(frame)
   # Store the shape of the original input image in WH format, we will need it for later
   shape_orig_WH = image_raw.size

   # Output shapes expected by the post-processor
   output_shapes = [(1, 255, 19, 19), (1, 255, 38, 38), (1, 255, 76, 76)]
   # Do inference with TensorRT
   trt_outputs = [] #get_engine(onnx_file_path, engine_file_path) as engine,
   
   
   inputs, outputs, bindings, stream = common.allocate_buffers(engine)
   # Do inference
   #print('Running inference on image {}...'.format(input_image_path))
   # Set host input to the image. The common.do_inference function will copy the input to the GPU before executing.
   inputs[0].host = image
   trt_outputs = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
   #b = time.clock()
   # Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
   trt_outputs = [output.reshape(shape) for output, shape in zip(trt_outputs, output_shapes)]

   postprocessor_args = {"yolo_masks": [(6, 7, 8), (3, 4, 5), (0, 1, 2)],                    # A list of 3 three-dimensional tuples for the YOLO masks
   "yolo_anchors": [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),  # A list of 9 two-dimensional tuples for the YOLO anchors
   (59, 119), (116, 90), (156, 198), (373, 326)],
   "obj_threshold": 0.6,                                               # Threshold for object coverage, float value between 0 and 1
   "nms_threshold": 0.5,                                               # Threshold for non-max suppression algorithm, float value between 0 and 1
   "yolo_input_resolution": input_resolution_yolov3_HW}

   postprocessor = PostprocessYOLO(**postprocessor_args)

   # Run the post-processing algorithms on the TensorRT outputs and get the bounding box details of detected objects
   boxes, classes, scores = postprocessor.process(trt_outputs, (shape_orig_WH))
   if boxes is None:
   pass
   else:
   for box, score, category in zip(boxes, scores, classes):
   x_coord, y_coord, width, height = box
   left = max(0, np.floor(x_coord + 0.5).astype(int))
   top = max(0, np.floor(y_coord + 0.5).astype(int))
   right = min(image_raw.width, np.floor(x_coord + width + 0.5).astype(int))
   bottom = min(image_raw.height, np.floor(y_coord + height + 0.5).astype(int))
   cv.rectangle(frame,(left,top),(right,bottom),(255,0,0),thickness=2)
   cv.putText(frame,"%s:%.2f"%(ALL_CATEGORIES[category],score),(left, top - 12),cv.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2,0)
   cv.imshow("obj_detected_img",frame)

   c = cv.waitKey(20)
   if c==27:
   break
   else:
   pass
   
if __name__ == '__main__':
   main()

[attach]9818[/attach]

[attach]9818[/attach]

[attach]9819[/attach]

你好楼主,请问你这个问题解决了吗?我和你相同的问题。