mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 06:23:20 -07:00
Formatting
This commit is contained in:
parent
858ddf4079
commit
fcbc8b125c
1 changed files with 134 additions and 117 deletions
|
@ -11,6 +11,7 @@ from utils import std_utils
|
|||
from .device import device
|
||||
from interact import interact as io
|
||||
|
||||
|
||||
class nnlib(object):
|
||||
device = device # forwards nnlib.devicelib to device in order to use nnlib as standalone lib
|
||||
DeviceConfig = device.Config
|
||||
|
@ -36,7 +37,6 @@ class nnlib(object):
|
|||
|
||||
code_import_dlib = None
|
||||
|
||||
|
||||
ResNet = None
|
||||
UNet = None
|
||||
UNetTemporalPredictor = None
|
||||
|
@ -115,7 +115,6 @@ UNetTemporalPredictor = nnlib.UNetTemporalPredictor
|
|||
NLayerDiscriminator = nnlib.NLayerDiscriminator
|
||||
"""
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _import_tf(device_config):
|
||||
if nnlib.tf is not None:
|
||||
|
@ -165,7 +164,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
nnlib._import_tf(device_config)
|
||||
elif nnlib.backend == "plaidML":
|
||||
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
|
||||
os.environ["PLAIDML_DEVICE_IDS"] = ",".join ( [ nnlib.device.getDeviceID(idx) for idx in device_config.gpu_idxs] )
|
||||
os.environ["PLAIDML_DEVICE_IDS"] = ",".join(
|
||||
[nnlib.device.getDeviceID(idx) for idx in device_config.gpu_idxs])
|
||||
|
||||
# if "tensorflow" in nnlib.backend:
|
||||
# nnlib.keras = nnlib.tf.keras
|
||||
|
@ -206,6 +206,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
def modelify(model_functor):
|
||||
def func(tensor):
|
||||
return keras.models.Model(tensor, model_functor(tensor))
|
||||
|
||||
return func
|
||||
|
||||
nnlib.modelify = modelify
|
||||
|
@ -233,7 +234,9 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
outputs += [K.conv2d(inputs[i], K.constant(gauss_kernel), strides=(1, 1), padding="same")]
|
||||
|
||||
return K.concatenate(outputs, axis=-1)
|
||||
|
||||
return func
|
||||
|
||||
nnlib.gaussian_blur = gaussian_blur
|
||||
|
||||
def style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, wnd_size=0, step_size=1):
|
||||
|
@ -269,13 +272,17 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
k = (sh - wnd_size) // step_size + 1
|
||||
if gaussian_blur_radius > 0.0:
|
||||
target, style = gblur(target), gblur(style)
|
||||
target = nnlib.tf.image.extract_image_patches(target, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID')
|
||||
style = nnlib.tf.image.extract_image_patches(style, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID')
|
||||
target = nnlib.tf.image.extract_image_patches(target, [1, k, k, 1], [1, 1, 1, 1],
|
||||
[1, step_size, step_size, 1], 'VALID')
|
||||
style = nnlib.tf.image.extract_image_patches(style, [1, k, k, 1], [1, 1, 1, 1],
|
||||
[1, step_size, step_size, 1], 'VALID')
|
||||
return sd(target, style, loss_weight)
|
||||
if nnlib.PML is not None:
|
||||
print("Sorry, plaidML backend does not support style_loss")
|
||||
return 0
|
||||
|
||||
return func
|
||||
|
||||
nnlib.style_loss = style_loss
|
||||
|
||||
def dssim(kernel_size=11, k1=0.01, k2=0.03, max_value=1.0):
|
||||
|
@ -461,6 +468,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
GAN Custom Scal Layer
|
||||
Code borrows from https://github.com/flyyufelix/cnn_finetune
|
||||
"""
|
||||
|
||||
def __init__(self, weights=None, axis=-1, gamma_init='zero', **kwargs):
|
||||
self.axis = axis
|
||||
self.gamma_init = keras.initializers.get(gamma_init)
|
||||
|
@ -485,6 +493,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
config = {"axis": self.axis}
|
||||
base_config = super(Scale, self).get_config()
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
nnlib.Scale = Scale
|
||||
|
||||
class Adam(keras.optimizers.Optimizer):
|
||||
|
@ -590,15 +599,18 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
'amsgrad': self.amsgrad}
|
||||
base_config = super(Adam, self).get_config()
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
nnlib.Adam = Adam
|
||||
|
||||
def CAInitializerMP(conv_weights_list):
|
||||
# Convolution Aware Initialization https://arxiv.org/abs/1702.06295
|
||||
result = CAInitializerMPSubprocessor ( [ (i, K.int_shape(conv_weights)) for i, conv_weights in enumerate(conv_weights_list) ], K.floatx(), K.image_data_format() ).run()
|
||||
result = CAInitializerMPSubprocessor(
|
||||
[(i, K.int_shape(conv_weights)) for i, conv_weights in enumerate(conv_weights_list)], K.floatx(),
|
||||
K.image_data_format()).run()
|
||||
for idx, weights in result:
|
||||
K.set_value(conv_weights_list[idx], weights)
|
||||
nnlib.CAInitializerMP = CAInitializerMP
|
||||
|
||||
nnlib.CAInitializerMP = CAInitializerMP
|
||||
|
||||
if backend == "plaidML":
|
||||
class TileOP_ReflectionPadding2D(nnlib.PMLTile.Operation):
|
||||
|
@ -644,7 +656,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
raise NotImplemented
|
||||
|
||||
super(TileOP_ReflectionPadding2D, self).__init__(c, [('I', input)],
|
||||
[('O', nnlib.PMLTile.Shape(input.shape.dtype, out_dims ) )])
|
||||
[('O', nnlib.PMLTile.Shape(input.shape.dtype,
|
||||
out_dims))])
|
||||
|
||||
class ReflectionPadding2D(keras.layers.Layer):
|
||||
def __init__(self, padding=(1, 1), **kwargs):
|
||||
|
@ -697,6 +710,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
if self.reflect_pad:
|
||||
x = ReflectionPadding2D(self.pad)(x)
|
||||
return self.func(x)
|
||||
|
||||
nnlib.Conv2D = Conv2D
|
||||
|
||||
class Conv2DTranspose():
|
||||
|
@ -717,6 +731,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
if self.reflect_pad:
|
||||
x = ReflectionPadding2D(self.pad)(x)
|
||||
return self.func(x)
|
||||
|
||||
nnlib.Conv2DTranspose = Conv2DTranspose
|
||||
|
||||
@staticmethod
|
||||
|
@ -771,6 +786,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
def __init__(self, mask, is_mse=False):
|
||||
self.mask = mask
|
||||
self.is_mse = is_mse
|
||||
|
||||
def __call__(self, y_true, y_pred):
|
||||
total_loss = None
|
||||
mask = self.mask
|
||||
|
@ -779,8 +795,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
return K.mean(50 * K.square(y_true * blur_mask - y_pred * blur_mask))
|
||||
else:
|
||||
return 10 * dssim()(y_true * mask, y_pred * mask)
|
||||
nnlib.DSSIMMSEMaskLoss = DSSIMMSEMaskLoss
|
||||
|
||||
nnlib.DSSIMMSEMaskLoss = DSSIMMSEMaskLoss
|
||||
|
||||
'''
|
||||
def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False):
|
||||
|
@ -967,6 +983,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
|||
return func
|
||||
nnlib.NLayerDiscriminator = NLayerDiscriminator
|
||||
'''
|
||||
|
||||
@staticmethod
|
||||
def finalize_all():
|
||||
if nnlib.keras_contrib is not None:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue