mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 06:23:20 -07:00
Added SeparableConv2d.py
This commit is contained in:
parent
acfc78bfd5
commit
ecf7eb083a
1 changed files with 103 additions and 0 deletions
103
core/leras/layers/SeparableConv2D.py
Normal file
103
core/leras/layers/SeparableConv2D.py
Normal file
|
@ -0,0 +1,103 @@
|
|||
import numpy as np
|
||||
from core.leras import nn
|
||||
tf = nn.tf
|
||||
|
||||
class SeparableConv2D(nn.LayerBase):
|
||||
"""
|
||||
default kernel_initializer - CA
|
||||
use_wscale bool enables equalized learning rate, if kernel_initializer is None, it will be forced to random_normal
|
||||
"""
|
||||
def __init__(self, in_ch, out_ch, kernel_size, depth_multiplier=1, strides=1, padding='SAME', dilations=1, use_bias=True, kernel_initializer=None, bias_initializer=None, trainable=True, dtype=None, **kwargs ):
|
||||
if not isinstance(strides, int):
|
||||
raise ValueError ("strides must be an int type")
|
||||
if not isinstance(dilations, int):
|
||||
raise ValueError ("dilations must be an int type")
|
||||
kernel_size = int(kernel_size)
|
||||
|
||||
if dtype is None:
|
||||
dtype = nn.floatx
|
||||
|
||||
if isinstance(padding, str):
|
||||
if padding == "SAME":
|
||||
padding = ( (kernel_size - 1) * dilations + 1 ) // 2
|
||||
elif padding == "VALID":
|
||||
padding = 0
|
||||
else:
|
||||
raise ValueError ("Wrong padding type. Should be VALID SAME or INT or 4x INTs")
|
||||
|
||||
if isinstance(padding, int):
|
||||
if padding != 0:
|
||||
if nn.data_format == "NHWC":
|
||||
padding = [ [0,0], [padding,padding], [padding,padding], [0,0] ]
|
||||
else:
|
||||
padding = [ [0,0], [0,0], [padding,padding], [padding,padding] ]
|
||||
else:
|
||||
padding = None
|
||||
|
||||
if nn.data_format == "NHWC":
|
||||
strides = [1,strides,strides,1]
|
||||
else:
|
||||
strides = [1,1,strides,strides]
|
||||
|
||||
if nn.data_format == "NHWC":
|
||||
dilations = [dilations,dilations]
|
||||
else:
|
||||
dilations = [dilations,dilations]
|
||||
|
||||
self.in_ch = in_ch
|
||||
self.out_ch = out_ch
|
||||
self.kernel_size = kernel_size
|
||||
self.depth_multiplier = depth_multiplier
|
||||
self.strides = strides
|
||||
self.padding = padding
|
||||
self.dilations = dilations
|
||||
self.use_bias = use_bias
|
||||
self.kernel_initializer = kernel_initializer
|
||||
self.bias_initializer = bias_initializer
|
||||
self.trainable = trainable
|
||||
self.dtype = dtype
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def build_weights(self):
|
||||
kernel_initializer = self.kernel_initializer
|
||||
|
||||
if kernel_initializer is None:
|
||||
kernel_initializer = nn.initializers.ca()
|
||||
|
||||
self.depthwise_kernel = tf.get_variable("depthwise_kernel", (self.kernel_size,self.kernel_size,self.in_ch,self.depth_multiplier), dtype=self.dtype, initializer=kernel_initializer, trainable=self.trainable )
|
||||
self.pointwise_kernel = tf.get_variable("pointwise_kernel", (1,1,self.depth_multiplier*self.in_ch,self.out_ch), dtype=self.dtype, initializer=kernel_initializer, trainable=self.trainable )
|
||||
|
||||
if self.use_bias:
|
||||
bias_initializer = self.bias_initializer
|
||||
if bias_initializer is None:
|
||||
bias_initializer = tf.initializers.zeros(dtype=self.dtype)
|
||||
|
||||
self.bias = tf.get_variable("bias", (self.out_ch,), dtype=self.dtype, initializer=bias_initializer, trainable=self.trainable )
|
||||
|
||||
def get_weights(self):
|
||||
weights = [self.depthwise_kernel, self.pointwise_kernel]
|
||||
if self.use_bias:
|
||||
weights += [self.bias]
|
||||
return weights
|
||||
|
||||
def forward(self, x):
|
||||
depthwise_kernel = self.depthwise_kernel
|
||||
pointwise_kernel = self.pointwise_kernel
|
||||
|
||||
if self.padding is not None:
|
||||
x = tf.pad (x, self.padding, mode='CONSTANT')
|
||||
|
||||
x = tf.nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, self.strides, 'VALID', self.dilations, data_format=nn.data_format)
|
||||
if self.use_bias:
|
||||
if nn.data_format == "NHWC":
|
||||
bias = tf.reshape (self.bias, (1,1,1,self.out_ch) )
|
||||
else:
|
||||
bias = tf.reshape (self.bias, (1,self.out_ch,1,1) )
|
||||
x = tf.add(x, bias)
|
||||
return x
|
||||
|
||||
def __str__(self):
|
||||
r = f"{self.__class__.__name__} : in_ch:{self.in_ch} out_ch:{self.out_ch} "
|
||||
|
||||
return r
|
||||
nn.SeparableConv2D = SeparableConv2D
|
Loading…
Add table
Add a link
Reference in a new issue