RT-DETR改进策略【Conv和Transformer】| TPAMI-2024 Conv2Former 利用卷积调制操作和大核卷积简化自注意力机制,提高网络性能
一、本文介绍
本文记录的是
利用
Conv2Former
优化
RT-DETR
的目标检测网络模型
。
Transformer
通过自注意力机制能够获取全局信息,但资源占用较大。卷积操作资源占用较少,但只能根据卷积核的大小获取局部信息。
Conv2Former
通过卷积调制操作简化了自注意力机制,更有效地利用了大核卷积,在视觉识别任务中表现出较好的性能。
二、Conv2Former介绍
Conv2Former: A Simple Transformer-Style ConvNet for Visual Recognition
Conv2Former
是一种用于视觉识别的新型卷积网络架构,其设计的原理和优势如下:
2.1 原理
2.1.1 整体架构
Conv2Former
采用金字塔结构,与
ConvNeXt
和
Swin Transformer
网络类似,共四个阶段,每阶段特征图分辨率不同,连续阶段间使用
patch embedding
块(通常为步长2的2×2卷积)降低分辨率,不同阶段有不同数量的卷积块,构建了
Conv2Former-N
、
Conv2Former-T
、
Conv2Former-S
、
Conv2Forme -B
、
Conv2Former-L
五种变体。
2.1.1 卷积调制块
-
Self-Attention
:对于输入令牌序列X,
Self-Attention首先通过线性层生成键K、查询Q和值V,输出为值的加权平均,基于相似性得分矩阵A,A通过Softmax(QK⊤)计算,矩阵A的形状为R ^ {N×N},使得自注意力的计算复杂度随序列长度N的增加而呈二次方增长。 - 卷积调制 :输入令牌X ∈ R ^ {H×W×C},使用核大小为k×k的简单深度卷积和哈达玛积计算输出z,具体为Z = A ⊙ V,A = DConv_(k×k)(W1X),V = W2X,其中⊙是哈达玛积,w1和w2是两个线性层的权重矩阵,DConv_(k×k)表示核大小为k×k的深度卷积。这样使得每个空间位置(h, w)与以(h, w)为中心的k×k方形区域内的所有像素相关联,通过线性层实现通道间的信息交互,每个空间位置的输出是该方形区域内所有像素的加权和。
2.2 优势
- 与Self - attention对比 :利用卷积建立关系,在处理高分辨率图像时比Self-Attention更节省内存。
- 与经典残差块对比 :由于调制操作,能够适应输入内容。
- 对大核卷积的利用 :ConvNeXt受益于将卷积核大小从3增大到7,但进一步增加核大小几乎没有性能增益且带来计算负担,而Conv2Former随着核大小从5×5增加到21×21,性能有持续提升,且默认将核大小设置为11×11以考虑模型效率。
- 加权策略 :将深度卷积的输出作为权重来调制线性投影后的特征,且在哈达玛积之前不使用激活或归一化层(如Sigmoid或Lp归一化),这是获得良好性能的关键因素,例如添加Sigmoid函数会使性能下降超过0.5%。
- 实验结果 :在ImageNet分类、COCO对象检测和ADE20k语义分割等任务中,Conv2Former的性能优于之前流行的ConvNets和大多数基于Transformer的模型。
论文: https://arxiv.org/pdf/2211.11943
源码: https://github.com/HVision-NKU/Conv2Former
三、Conv2Former的实现代码
Conv2Former模块
的实现代码如下:
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
import math
from ultralytics.nn.modules.conv import LightConv
from ultralytics.utils.torch_utils import fuse_conv_and_bn
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups mg
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
return self.act(self.conv(x))
class MLP(nn.Module):
def __init__(self, dim, mlp_ratio=4):
super().__init__()
self.norm = LayerNorm(dim, eps=1e-6, data_format="channels_first")#mg
self.fc1 = nn.Conv2d(dim, dim * mlp_ratio, 1)
self.pos = nn.Conv2d(dim * mlp_ratio, dim * mlp_ratio, 3, padding=1, groups=dim * mlp_ratio)
self.fc2 = nn.Conv2d(dim * mlp_ratio, dim, 1)
self.act = nn.GELU()
def forward(self, x):
B, C, H, W = x.shape
x = self.norm(x)
x = self.fc1(x)
x = self.act(x)
x = x + self.act(self.pos(x))
x = self.fc2(x)
return x
class Conv2FormerBlock(nn.Module):
def __init__(self, dim, mlp_ratio=4, drop_path=0.):
super().__init__()
self.attn = ConvMod(dim)
self.mlp = MLP(dim, mlp_ratio)#mg
layer_scale_init_value = 1e-6
self.layer_scale_1 = nn.Parameter(
layer_scale_init_value * torch.ones((dim)), requires_grad=True)
self.layer_scale_2 = nn.Parameter(
layer_scale_init_value * torch.ones((dim)), requires_grad=True)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * self.attn(x))
x = x + self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(x))
return x
class LayerNorm(nn.Module):
r""" From ConvNeXt (https://arxiv.org/pdf/2201.03545.pdf)
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format#mg
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class ConvMod(nn.Module):
def __init__(self, dim):
super().__init__()
self.norm = LayerNorm(dim, eps=1e-6, data_format="channels_first")
self.a = nn.Sequential(
nn.Conv2d(dim, dim, 1),
nn.GELU(),
nn.Conv2d(dim, dim, 11, padding=5, groups=dim)
)
self.v = nn.Conv2d(dim, dim, 1)
self.proj = nn.Conv2d(dim, dim, 1)
def forward(self, x):
B, C, H, W = x.shape
x = self.norm(x)
a = self.a(x)
x = a * self.v(x)
x = self.proj(x)
return x
class Conv2Formers(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=False, e=0.5):
super().__init__()
self.c = int(c2 * e)
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv((2 + n) * self.c, c2, 1)#mg
self.cb = nn.ModuleList(Conv2FormerBlock(self.c) for _ in range(n))
def forward(self, x):
y = list(self.cv1(x).split((self.c, self.c), 1))
y.extend(cb(y[-1]) for cb in self.cb)
return self.cv2(torch.cat(y, 1))
def autopad(k, p=None, d=1): # kernel, padding, dilation
"""Pad to 'same' shape outputs."""
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
"""Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)."""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
"""Initialize Conv layer with given arguments including activation."""
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
"""Apply convolution, batch normalization and activation to input tensor."""
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
"""Perform transposed convolution of 2D data."""
return self.act(self.conv(x))
class HGBlock_Conv2Formers(nn.Module):
"""
HG_Block of PPHGNetV2 with 2 convolutions and LightConv.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
"""
def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()):
"""Initializes a CSP Bottleneck with 1 convolution using specified input and output channels."""
super().__init__()
block = LightConv if lightconv else Conv
self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n))
self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act) # squeeze conv
self.ec = Conv(c2 // 2, c2, 1, 1, act=act) # excitation conv
self.add = shortcut and c1 == c2
self.cv = Conv2Formers(c1, c2)
def forward(self, x):
"""Forward pass of a PPHGNetV2 backbone layer."""
y = [x]
y.extend(m(y[-1]) for m in self.m)
y = self.cv(self.ec(self.sc(torch.cat(y, 1))))
return y + x if self.add else y
四、创新模块
4.1 改进点⭐
模块改进方法
:直接加入
Conv2Former
(
第五节讲解添加步骤
)。
Conv2Former
模块加入如下:
4.2 改进点⭐
模块改进方法
:基于
Conv2Former模块
的
HGBlock
(
第五节讲解添加步骤
)。
第二种改进方法是对
RT-DETR
中的
HGBlock模块
进行改进,并将
Conv2Former
在加入到
HGBlock
模块中。
Conv2Formers
利用卷积建立关系,在处理高分辨率图像时比
Self-Attention
更节省内存,且能够获取全局信息,大核卷积弥补了常规卷积的不足,提高了网络性能,为RT-DETR提供更丰富的特征表示
改进代码如下:
首先对
HGBlock
模块进行改进,加入
Conv2Former模块
,并重命名为
HGBlock_Conv2Formers
。
class HGBlock_Conv2Formers(nn.Module):
"""
HG_Block of PPHGNetV2 with 2 convolutions and LightConv.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
"""
def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()):
"""Initializes a CSP Bottleneck with 1 convolution using specified input and output channels."""
super().__init__()
block = LightConv if lightconv else Conv
self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n))
self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act) # squeeze conv
self.ec = Conv(c2 // 2, c2, 1, 1, act=act) # excitation conv
self.add = shortcut and c1 == c2
self.cv = Conv2Formers(c1, c2)
def forward(self, x):
"""Forward pass of a PPHGNetV2 backbone layer."""
y = [x]
y.extend(m(y[-1]) for m in self.m)
y = self.cv(self.ec(self.sc(torch.cat(y, 1))))
return y + x if self.add else y
注意❗:在
第五小节
中需要声明的模块名称为:
Conv2Former
和
HGBlock_Conv2Formers
。
五、添加步骤
5.1 修改一
① 在
ultralytics/nn/
目录下新建
AddModules
文件夹用于存放模块代码
② 在
AddModules
文件夹下新建
Conv2Former.py
,将
第三节
中的代码粘贴到此处
5.2 修改二
在
AddModules
文件夹下新建
__init__.py
(已有则不用新建),在文件内导入模块:
from .Conv2Former import *
5.3 修改三
在
ultralytics/nn/modules/tasks.py
文件中,需要在两处位置添加各模块类名称。
首先:导入模块
其次:在
parse_model函数
中注册
Conv2Former
和
HGBlock_Conv2Formers
模块
最后,还需在此文件添加如下代码:
elif m in [Conv2Formers]:
c1, c2 = ch[f], args[0]
if c2 != nc: # if not outputss
c2 = make_divisible(min(c2, max_channels) * width, 8)
args = [c1, c2, *args[1:]]
if m in [Conv2Formers]:
args.insert(2, n) # number of repeats
n = 1
六、yaml模型文件
6.1 模型改进版本⭐
此处以
ultralytics/cfg/models/rt-detr/rtdetr-l.yaml
为例,在同目录下创建一个用于自己数据集训练的模型文件
rtdetr-l-Conv2Former.yaml
。
将
rtdetr-l.yaml
中的内容复制到
rtdetr-l-Conv2Former.yaml
文件下,修改
nc
数量等于自己数据中目标的数量。
📌 模型的修改方法是将
骨干网络
中的
HGBlock模块
替换成
Conv2Formers模块
。
# Ultralytics YOLO 🚀, AGPL-3.0 license
# RT-DETR-l object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr
# Parameters
nc: 1 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
# [depth, width, max_channels]
l: [1.00, 1.00, 1024]
backbone:
# [from, repeats, module, args]
- [-1, 1, HGStem, [32, 48]] # 0-P2/4
- [-1, 6, HGBlock, [48, 128, 3]] # stage 1
- [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8
- [-1, 6, HGBlock, [96, 512, 3]] # stage 2
- [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P4/16
- [-1, 6, Conv2Formers, [512]] # cm, c2, k, light, shortcut
- [-1, 6, Conv2Formers, [512]]
- [-1, 6, Conv2Formers, [512]] # stage 3
- [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P5/32
- [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4
head:
- [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2
- [-1, 1, AIFI, [1024, 8]]
- [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1
- [[-2, -1], 1, Concat, [1]]
- [-1, 3, RepC3, [256]] # 16, fpn_blocks.0
- [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0
- [[-2, -1], 1, Concat, [1]] # cat backbone P4
- [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1
- [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0
- [[-1, 17], 1, Concat, [1]] # cat Y4
- [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0
- [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1
- [[-1, 12], 1, Concat, [1]] # cat Y5
- [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1
- [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5)
6.2 模型改进版本⭐
此处以
ultralytics/cfg/models/rt-detr/rtdetr-l.yaml
为例,在同目录下创建一个用于自己数据集训练的模型文件
rtdetr-l-HGBlock_Conv2Formers.yaml
。
将
rtdetr-l.yaml
中的内容复制到
rtdetr-l-HGBlock_Conv2Formers.yaml
文件下,修改
nc
数量等于自己数据中目标的数量。
📌 模型的修改方法是将
骨干网络
中的
HGBlock模块
替换成
HGBlock_Conv2Formers模块
。
# Ultralytics YOLO 🚀, AGPL-3.0 license
# RT-DETR-l object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr
# Parameters
nc: 1 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
# [depth, width, max_channels]
l: [1.00, 1.00, 1024]
backbone:
# [from, repeats, module, args]
- [-1, 1, HGStem, [32, 48]] # 0-P2/4
- [-1, 6, HGBlock, [48, 128, 3]] # stage 1
- [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8
- [-1, 6, HGBlock, [96, 512, 3]] # stage 2
- [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P4/16
- [-1, 6, HGBlock_Conv2Formers, [192, 512, 5, True, False]] # cm, c2, k, light, shortcut
- [-1, 6, HGBlock_Conv2Formers, [192, 512, 5, True, True]]
- [-1, 6, HGBlock_Conv2Formers, [192, 512, 5, True, True]] # stage 3
- [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P5/32
- [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4
head:
- [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2
- [-1, 1, AIFI, [1024, 8]]
- [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1
- [[-2, -1], 1, Concat, [1]]
- [-1, 3, RepC3, [256]] # 16, fpn_blocks.0
- [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0
- [[-2, -1], 1, Concat, [1]] # cat backbone P4
- [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1
- [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0
- [[-1, 17], 1, Concat, [1]] # cat Y4
- [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0
- [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1
- [[-1, 12], 1, Concat, [1]] # cat Y5
- [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1
- [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5)
七、成功运行结果
打印网络模型可以看到
Conv2Formers
和
HGBlock_Conv2Formers
已经加入到模型中,并可以进行训练了。
rtdetr-l-Conv2Formers :
rtdetr-l-Conv2Formers summary: 949 layers, 52,490,563 parameters, 52,490,563 gradients, 170.7 GFLOPs
from n params module arguments
0 -1 1 25248 ultralytics.nn.modules.block.HGStem [3, 32, 48]
1 -1 6 155072 ultralytics.nn.modules.block.HGBlock [48, 48, 128, 3, 6]
2 -1 1 1408 ultralytics.nn.modules.conv.DWConv [128, 128, 3, 2, 1, False]
3 -1 6 839296 ultralytics.nn.modules.block.HGBlock [128, 96, 512, 3, 6]
4 -1 1 5632 ultralytics.nn.modules.conv.DWConv [512, 512, 3, 2, 1, False]
5 -1 6 8540160 ultralytics.nn.AddModules.Conv2Former.Conv2Formers[512, 512]
6 -1 6 8540160 ultralytics.nn.AddModules.Conv2Former.Conv2Formers[512, 512]
7 -1 6 8540160 ultralytics.nn.AddModules.Conv2Former.Conv2Formers[512, 512]
8 -1 1 11264 ultralytics.nn.modules.conv.DWConv [512, 1024, 3, 2, 1, False]
9 -1 6 6708480 ultralytics.nn.modules.block.HGBlock [1024, 384, 2048, 5, 6, True, False]
10 -1 1 524800 ultralytics.nn.modules.conv.Conv [2048, 256, 1, 1, None, 1, 1, False]
11 -1 1 789760 ultralytics.nn.modules.transformer.AIFI [256, 1024, 8]
12 -1 1 66048 ultralytics.nn.modules.conv.Conv [256, 256, 1, 1]
13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
14 7 1 131584 ultralytics.nn.modules.conv.Conv [512, 256, 1, 1, None, 1, 1, False]
15 [-2, -1] 1 0 ultralytics.nn.modules.conv.Concat [1]
16 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
17 -1 1 66048 ultralytics.nn.modules.conv.Conv [256, 256, 1, 1]
18 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
19 3 1 131584 ultralytics.nn.modules.conv.Conv [512, 256, 1, 1, None, 1, 1, False]
20 [-2, -1] 1 0 ultralytics.nn.modules.conv.Concat [1]
21 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
22 -1 1 590336 ultralytics.nn.modules.conv.Conv [256, 256, 3, 2]
23 [-1, 17] 1 0 ultralytics.nn.modules.conv.Concat [1]
24 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
25 -1 1 590336 ultralytics.nn.modules.conv.Conv [256, 256, 3, 2]
26 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1]
27 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
28 [21, 24, 27] 1 7303907 ultralytics.nn.modules.head.RTDETRDecoder [1, [256, 256, 256]]
rtdetr-l-Conv2Formers summary: 949 layers, 52,490,563 parameters, 52,490,563 gradients, 170.7 GFLOPs
rtdetr-l-HGBlock_Conv2Formers :
rtdetr-l-HGBlock_Conv2Formers summary: 755 layers, 33,764,035 parameters, 33,764,035 gradients, 111.0 GFLOPs
from n params module arguments
0 -1 1 25248 ultralytics.nn.modules.block.HGStem [3, 32, 48]
1 -1 6 155072 ultralytics.nn.modules.block.HGBlock [48, 48, 128, 3, 6]
2 -1 1 1408 ultralytics.nn.modules.conv.DWConv [128, 128, 3, 2, 1, False]
3 -1 6 839296 ultralytics.nn.modules.block.HGBlock [128, 96, 512, 3, 6]
4 -1 1 5632 ultralytics.nn.modules.conv.DWConv [512, 512, 3, 2, 1, False]
5 -1 6 2297984 ultralytics.nn.AddModules.Conv2Former.HGBlock_Conv2Formers[512, 192, 512, 5, 6, True, False]
6 -1 6 2297984 ultralytics.nn.AddModules.Conv2Former.HGBlock_Conv2Formers[512, 192, 512, 5, 6, True, True]
7 -1 6 2297984 ultralytics.nn.AddModules.Conv2Former.HGBlock_Conv2Formers[512, 192, 512, 5, 6, True, True]
8 -1 1 11264 ultralytics.nn.modules.conv.DWConv [512, 1024, 3, 2, 1, False]
9 -1 6 6708480 ultralytics.nn.modules.block.HGBlock [1024, 384, 2048, 5, 6, True, False]
10 -1 1 524800 ultralytics.nn.modules.conv.Conv [2048, 256, 1, 1, None, 1, 1, False]
11 -1 1 789760 ultralytics.nn.modules.transformer.AIFI [256, 1024, 8]
12 -1 1 66048 ultralytics.nn.modules.conv.Conv [256, 256, 1, 1]
13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
14 7 1 131584 ultralytics.nn.modules.conv.Conv [512, 256, 1, 1, None, 1, 1, False]
15 [-2, -1] 1 0 ultralytics.nn.modules.conv.Concat [1]
16 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
17 -1 1 66048 ultralytics.nn.modules.conv.Conv [256, 256, 1, 1]
18 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
19 3 1 131584 ultralytics.nn.modules.conv.Conv [512, 256, 1, 1, None, 1, 1, False]
20 [-2, -1] 1 0 ultralytics.nn.modules.conv.Concat [1]
21 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
22 -1 1 590336 ultralytics.nn.modules.conv.Conv [256, 256, 3, 2]
23 [-1, 17] 1 0 ultralytics.nn.modules.conv.Concat [1]
24 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
25 -1 1 590336 ultralytics.nn.modules.conv.Conv [256, 256, 3, 2]
26 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1]
27 -1 3 2232320 ultralytics.nn.modules.block.RepC3 [512, 256, 3]
28 [21, 24, 27] 1 7303907 ultralytics.nn.modules.head.RTDETRDecoder [1, [256, 256, 256]]
rtdetr-l-HGBlock_Conv2Formers summary: 755 layers, 33,764,035 parameters, 33,764,035 gradients, 111.0 GFLOPs