Unverified Commit e96c74b5 authored by Yonghye Kwon's avatar Yonghye Kwon Committed by GitHub
Browse files

Simpler code for DWConvClass (#4310)

* more simpler code for DWConvClass

more simpler code for DWConvClass

* remove DWConv function

* Replace DWConvClass with DWConv
parent f409d8e5
......@@ -29,11 +29,6 @@ def autopad(k, p=None): # kernel, padding
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depth-wise convolution function
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
......@@ -49,11 +44,10 @@ class Conv(nn.Module):
return self.act(self.conv(x))
class DWConvClass(Conv):
class DWConv(Conv):
# Depth-wise convolution class
def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__(c1, c2, k, s, act)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k), groups=math.gcd(c1, c2), bias=False)
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class TransformerLayer(nn.Module):
......
......@@ -202,7 +202,7 @@ class Model(nn.Module):
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
LOGGER.info('Fusing layers... ')
for m in self.model.modules():
if isinstance(m, (Conv, DWConvClass)) and hasattr(m, 'bn'):
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.forward_fuse # update forward
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment