Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) Jetson xavier
• DeepStream Version 6.0.1
• JetPack Version (valid for Jetson only) 4.6.1
• TensorRT Version 8.2
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
I want to create wts and cfg files using the trained yolov7-tiny model.
However, it cannot be converted through gen_wts_yoloV7.py provided by Deepstream-Yolo .(GitHub - marcoslucianops/DeepStream-Yolo: NVIDIA DeepStream SDK 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 implementation for YOLO models)
An error occurs because child._get_name() is IDetect in gen_wts_yoloV7.py. Currently there is only Detect in this file. how can i solve it?
[yolov7-tiny train cfg]
parameters
nc: 1 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple
anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32
yolov7-tiny backbone
backbone:
[from, number, module, args] c2, k=1, s=1, p=None, g=1, act=True
[[-1, 1, Conv, [32, 3, 2, None, 1, nn.LeakyReLU(0.1)]], # 0-P1/2
[-1, 1, Conv, [64, 3, 2, None, 1, nn.LeakyReLU(0.1)]], # 1-P2/4
[-1, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 7
[-1, 1, MP, ], # 8-P3/8
[-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 14
[-1, 1, MP, ], # 15-P4/16
[-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 21
[-1, 1, MP, ], # 22-P5/32
[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [512, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 28
]
yolov7-tiny head
head:
[[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, SP, [5]],
[-2, 1, SP, [9]],
[-3, 1, SP, [13]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -7], 1, Concat, [1]],
[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 37
[-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, nn.Upsample, [None, 2, ‘nearest’]],
[21, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # route backbone P4
[[-1, -2], 1, Concat, [1]],
[-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 47
[-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, nn.Upsample, [None, 2, ‘nearest’]],
[14, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # route backbone P3
[[-1, -2], 1, Concat, [1]],
[-1, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 57
[-1, 1, Conv, [128, 3, 2, None, 1, nn.LeakyReLU(0.1)]],
[[-1, 47], 1, Concat, [1]],
[-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 65
[-1, 1, Conv, [256, 3, 2, None, 1, nn.LeakyReLU(0.1)]],
[[-1, 37], 1, Concat, [1]],
[-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-2, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[-1, -2, -3, -4], 1, Concat, [1]],
[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 73
[57, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[65, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[73, 1, Conv, [512, 3, 1, None, 1, nn.LeakyReLU(0.1)]],
[[74,75,76], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5)
]
[gen_wts_yoloV7.py]
import argparse
import os
import struct
import torch
from utils.torch_utils import select_device
class Layers(object):
def init(self, n, size, fw, fc):
self.blocks = [0 for _ in range(n)]
self.current = 0
self.width = size[0] if len(size) == 1 else size[1]
self.height = size[0]
self.num = 0
self.nc = 0
self.anchors = ''
self.masks = []
self.fw = fw
self.fc = fc
self.wc = 0
self.net()
def ReOrg(self, child):
self.current = child.i
self.fc.write('\n# ReOrg\n')
self.reorg()
def Conv(self, child):
self.current = child.i
self.fc.write('\n# Conv\n')
if child.f != -1:
r = self.get_route(child.f)
self.route('%d' % r)
self.convolutional(child)
def DownC(self, child):
self.current = child.i
self.fc.write('\n# DownC\n')
self.maxpool(child.mp)
self.convolutional(child.cv3)
self.route('-3')
self.convolutional(child.cv1)
self.convolutional(child.cv2)
self.route('-1, -4')
def MP(self, child):
self.current = child.i
self.fc.write('\n# MP\n')
self.maxpool(child.m)
def SP(self, child):
self.current = child.i
self.fc.write('\n# SP\n')
if child.f != -1:
r = self.get_route(child.f)
self.route('%d' % r)
self.maxpool(child.m)
def SPPCSPC(self, child):
self.current = child.i
self.fc.write('\n# SPPCSPC\n')
self.convolutional(child.cv2)
self.route('-2')
self.convolutional(child.cv1)
self.convolutional(child.cv3)
self.convolutional(child.cv4)
self.maxpool(child.m[0])
self.route('-2')
self.maxpool(child.m[1])
self.route('-4')
self.maxpool(child.m[2])
self.route('-6, -5, -3, -1')
self.convolutional(child.cv5)
self.convolutional(child.cv6)
self.route('-1, -13')
self.convolutional(child.cv7)
def RepConv(self, child):
self.current = child.i
self.fc.write('\n# RepConv\n')
if child.f != -1:
r = self.get_route(child.f)
self.route('%d' % r)
self.convolutional(child.rbr_1x1)
self.route('-2')
self.convolutional(child.rbr_dense)
self.shortcut(-3, act=self.get_activation(child.act._get_name()))
def Upsample(self, child):
self.current = child.i
self.fc.write('\n# Upsample\n')
self.upsample(child)
def Concat(self, child):
self.current = child.i
self.fc.write('\n# Concat\n')
r = []
for i in range(1, len(child.f)):
r.append(self.get_route(child.f[i]))
self.route('-1, %s' % str(r)[1:-1])
def Shortcut(self, child):
self.current = child.i
self.fc.write('\n# Shortcut\n')
r = self.get_route(child.f[1])
self.shortcut(r)
def Detect(self, child):
self.current = child.i
self.fc.write('\n# Detect\n')
self.get_anchors(child.state_dict(), child.m[0].out_channels)
for i, m in enumerate(child.m):
r = self.get_route(child.f[i])
self.route('%d' % r)
self.convolutional(m, detect=True)
self.yolo(i)
def net(self):
self.fc.write('[net]\n' +
'width=%d\n' % self.width +
'height=%d\n' % self.height +
'channels=3\n' +
'letter_box=1\n')
def reorg(self):
self.blocks[self.current] += 1
self.fc.write('\n[reorg]\n')
def convolutional(self, cv, act=None, detect=False):
self.blocks[self.current] += 1
self.get_state_dict(cv.state_dict())
if cv._get_name() == 'Conv2d':
filters = cv.out_channels
size = cv.kernel_size
stride = cv.stride
pad = cv.padding
groups = cv.groups
bias = cv.bias
bn = False
act = 'linear' if not detect else 'logistic'
elif cv._get_name() == 'Sequential':
filters = cv[0].out_channels
size = cv[0].kernel_size
stride = cv[0].stride
pad = cv[0].padding
groups = cv[0].groups
bias = cv[0].bias
bn = True if cv[1]._get_name() == 'BatchNorm2d' else False
act = 'linear'
else:
filters = cv.conv.out_channels
size = cv.conv.kernel_size
stride = cv.conv.stride
pad = cv.conv.padding
groups = cv.conv.groups
bias = cv.conv.bias
bn = True if hasattr(cv, 'bn') else False
if act is None:
act = self.get_activation(cv.act._get_name()) if hasattr(cv, 'act') else 'linear'
b = 'batch_normalize=1\n' if bn is True else ''
g = 'groups=%d\n' % groups if groups > 1 else ''
w = 'bias=1\n' if bias is not None and bn is not False else 'bias=0\n' if bias is None and bn is False else ''
self.fc.write('\n[convolutional]\n' +
b +
'filters=%d\n' % filters +
'size=%s\n' % self.get_value(size) +
'stride=%s\n' % self.get_value(stride) +
'pad=%s\n' % self.get_value(pad) +
g +
w +
'activation=%s\n' % act)
def route(self, layers):
self.blocks[self.current] += 1
self.fc.write('\n[route]\n' +
'layers=%s\n' % layers)
def shortcut(self, r, act='linear'):
self.blocks[self.current] += 1
self.fc.write('\n[shortcut]\n' +
'from=%d\n' % r +
'activation=%s\n' % act)
def maxpool(self, m):
self.blocks[self.current] += 1
stride = m.stride
size = m.kernel_size
mode = m.ceil_mode
m = 'maxpool_up' if mode else 'maxpool'
self.fc.write('\n[%s]\n' % m +
'stride=%d\n' % stride +
'size=%d\n' % size)
def upsample(self, child):
self.blocks[self.current] += 1
stride = child.scale_factor
self.fc.write('\n[upsample]\n' +
'stride=%d\n' % stride)
def yolo(self, i):
self.blocks[self.current] += 1
self.fc.write('\n[yolo]\n' +
'mask=%s\n' % self.masks[i] +
'anchors=%s\n' % self.anchors +
'classes=%d\n' % self.nc +
'num=%d\n' % self.num +
'scale_x_y=2.0\n' +
'new_coords=1\n')
def get_state_dict(self, state_dict):
for k, v in state_dict.items():
if 'num_batches_tracked' not in k:
vr = v.reshape(-1).numpy()
self.fw.write('{} {} '.format(k, len(vr)))
for vv in vr:
self.fw.write(' ')
self.fw.write(struct.pack('>f', float(vv)).hex())
self.fw.write('\n')
self.wc += 1
def get_anchors(self, state_dict, out_channels):
anchor_grid = state_dict['anchor_grid']
aa = anchor_grid.reshape(-1).tolist()
am = anchor_grid.tolist()
self.num = (len(aa) / 2)
self.nc = int((out_channels / (self.num / len(am))) - 5)
self.anchors = str(aa)[1:-1]
n = 0
for m in am:
mask = []
for _ in range(len(m)):
mask.append(n)
n += 1
self.masks.append(str(mask)[1:-1])
def get_value(self, key):
if type(key) == int:
return key
return key[0] if key[0] == key[1] else str(key)[1:-1]
def get_route(self, n):
r = 0
if n < 0:
for i, b in enumerate(self.blocks[self.current-1::-1]):
if i < abs(n) - 1:
r -= b
else:
break
else:
for i, b in enumerate(self.blocks):
if i <= n:
r += b
else:
break
return r - 1
def get_activation(self, act):
if act == 'Hardswish':
return 'hardswish'
elif act == 'LeakyReLU':
return 'leaky'
elif act == 'SiLU':
return 'silu'
return 'linear'
def parse_args():
parser = argparse.ArgumentParser(description=‘PyTorch YOLOv7 conversion’)
parser.add_argument(‘-w’, ‘–weights’, required=True, help=‘Input weights (.pt) file path (required)’)
parser.add_argument(
‘-s’, ‘–size’, nargs=‘+’, type=int, help=‘Inference size [H,W] (default [640])’)
parser.add_argument(“–p6”, action=“store_true”, help=“P6 model”)
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit(‘Invalid weights file’)
if not args.size:
args.size = [1280] if args.p6 else [640]
return args.weights, args.size
pt_file, inference_size = parse_args()
model_name = os.path.basename(pt_file).split(‘.pt’)[0]
wts_file = model_name + ‘.wts’ if ‘yolov7’ in model_name else ‘yolov7_’ + model_name + ‘.wts’
cfg_file = model_name + ‘.cfg’ if ‘yolov7’ in model_name else ‘yolov7_’ + model_name + ‘.cfg’
device = select_device(‘cpu’)
model = torch.load(pt_file, map_location=device)
model = model[‘ema’ if model.get(‘ema’) else ‘model’].float()
anchor_grid = model.model[-1].anchors * model.model[-1].stride[…, None, None]
delattr(model.model[-1], ‘anchor_grid’)
model.model[-1].register_buffer(‘anchor_grid’, anchor_grid)
model.to(device).eval()
with open(wts_file, ‘w’) as fw, open(cfg_file, ‘w’) as fc:
layers = Layers(len(model.model), inference_size, fw, fc)
for child in model.model.children():
if child._get_name() == 'ReOrg':
layers.ReOrg(child)
elif child._get_name() == 'Conv':
layers.Conv(child)
elif child._get_name() == 'DownC':
layers.DownC(child)
elif child._get_name() == 'MP':
layers.MP(child)
elif child._get_name() == 'SP':
layers.SP(child)
elif child._get_name() == 'SPPCSPC':
layers.SPPCSPC(child)
elif child._get_name() == 'RepConv':
layers.RepConv(child)
elif child._get_name() == 'Upsample':
layers.Upsample(child)
elif child._get_name() == 'Concat':
layers.Concat(child)
elif child._get_name() == 'Shortcut':
layers.Shortcut(child)
elif child._get_name() == 'Detect':
layers.Detect(child)
else:
raise SystemExit('Model not supported')
os.system(‘echo “%d” | cat - %s > temp && mv temp %s’ % (layers.wc, wts_file, wts_file))