from typing import Dict, Tuple, Union
from torch import nn
NORM_LAYERS = dict(
BN=nn.BatchNorm2d,
BN1d=nn.BatchNorm1d,
BN2d=nn.BatchNorm2d,
BN3d=nn.BatchNorm3d,
LN=nn.LayerNorm,
IN=nn.InstanceNorm2d,
)
[docs]def build_norm_layer(cfg: Dict,
num_features: int,
postfix: Union[int, str] = '') -> Tuple[str, nn.Module]:
"""Build normalization layer. Modified from openmmlab.
:param cfg: (dict) The norm layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate a norm layer.
- requires_grad (bool, optional): Whether stop gradient updates.
:param num_features: (int) Number of input channels.
:param postfix: (int | str) The postfix to be appended into norm abbreviation
to create named layer.
:returns: tuple[str, nn.Module]: The first element is the layer name consisting
of abbreviation and postfix, e.g., bn1, gn. The second element is the
created norm layer.
"""
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type' not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in NORM_LAYERS:
raise KeyError(f'Unrecognized norm type {layer_type}')
norm_layer = NORM_LAYERS.get(layer_type)
abbr = layer_type.lower()
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-5)
if layer_type != 'GN':
layer = norm_layer(num_features, **cfg_)
if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'):
layer._specify_ddp_gpu_num(1)
else:
assert 'num_groups' in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer