Shortcuts

mmyolo.models.necks.yolov5_pafpn 源代码

# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union

import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer
from mmdet.utils import ConfigType, OptMultiConfig

from mmyolo.registry import MODELS
from ..utils import make_divisible, make_round
from .base_yolo_neck import BaseYOLONeck


[文档]@MODELS.register_module() class YOLOv5PAFPN(BaseYOLONeck): """Path Aggregation Network used in YOLOv5. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) deepen_factor (float): Depth multiplier, multiply number of blocks in CSP layer by this amount. Defaults to 1.0. widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Defaults to 1.0. num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1. freeze_all(bool): Whether to freeze the model norm_cfg (dict): Config dict for normalization layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). act_cfg (dict): Config dict for activation layer. Defaults to dict(type='SiLU', inplace=True). init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels: List[int], out_channels: Union[List[int], int], deepen_factor: float = 1.0, widen_factor: float = 1.0, num_csp_blocks: int = 1, freeze_all: bool = False, norm_cfg: ConfigType = dict( type='BN', momentum=0.03, eps=0.001), act_cfg: ConfigType = dict(type='SiLU', inplace=True), init_cfg: OptMultiConfig = None): self.num_csp_blocks = num_csp_blocks super().__init__( in_channels=in_channels, out_channels=out_channels, deepen_factor=deepen_factor, widen_factor=widen_factor, freeze_all=freeze_all, norm_cfg=norm_cfg, act_cfg=act_cfg, init_cfg=init_cfg)
[文档] def init_weights(self): if self.init_cfg is None: """Initialize the parameters.""" for m in self.modules(): if isinstance(m, torch.nn.Conv2d): # In order to be consistent with the source code, # reset the Conv2d initialization parameters m.reset_parameters() else: super().init_weights()
[文档] def build_reduce_layer(self, idx: int) -> nn.Module: """build reduce layer. Args: idx (int): layer idx. Returns: nn.Module: The reduce layer. """ if idx == len(self.in_channels) - 1: layer = ConvModule( make_divisible(self.in_channels[idx], self.widen_factor), make_divisible(self.in_channels[idx - 1], self.widen_factor), 1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) else: layer = nn.Identity() return layer
[文档] def build_upsample_layer(self, *args, **kwargs) -> nn.Module: """build upsample layer.""" return nn.Upsample(scale_factor=2, mode='nearest')
[文档] def build_top_down_layer(self, idx: int): """build top down layer. Args: idx (int): layer idx. Returns: nn.Module: The top down layer. """ if idx == 1: return CSPLayer( make_divisible(self.in_channels[idx - 1] * 2, self.widen_factor), make_divisible(self.in_channels[idx - 1], self.widen_factor), num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), add_identity=False, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) else: return nn.Sequential( CSPLayer( make_divisible(self.in_channels[idx - 1] * 2, self.widen_factor), make_divisible(self.in_channels[idx - 1], self.widen_factor), num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), add_identity=False, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg), ConvModule( make_divisible(self.in_channels[idx - 1], self.widen_factor), make_divisible(self.in_channels[idx - 2], self.widen_factor), kernel_size=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
[文档] def build_downsample_layer(self, idx: int) -> nn.Module: """build downsample layer. Args: idx (int): layer idx. Returns: nn.Module: The downsample layer. """ return ConvModule( make_divisible(self.in_channels[idx], self.widen_factor), make_divisible(self.in_channels[idx], self.widen_factor), kernel_size=3, stride=2, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
[文档] def build_bottom_up_layer(self, idx: int) -> nn.Module: """build bottom up layer. Args: idx (int): layer idx. Returns: nn.Module: The bottom up layer. """ return CSPLayer( make_divisible(self.in_channels[idx] * 2, self.widen_factor), make_divisible(self.in_channels[idx + 1], self.widen_factor), num_blocks=make_round(self.num_csp_blocks, self.deepen_factor), add_identity=False, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
[文档] def build_out_layer(self, *args, **kwargs) -> nn.Module: """build out layer.""" return nn.Identity()
Read the Docs v: latest
Versions
latest
stable
dev
Downloads
pdf
html
epub
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.