Merge branch 'master' into feature/ms-ssim-loss-2

This commit is contained in:
jh 2021-03-24 13:09:38 -07:00
commit f5c56323a9
2 changed files with 178 additions and 28 deletions

View file

@ -195,3 +195,117 @@ class UNetPatchDiscriminator(nn.ModelBase):
return center_out, self.out_conv(x) return center_out, self.out_conv(x)
nn.UNetPatchDiscriminator = UNetPatchDiscriminator nn.UNetPatchDiscriminator = UNetPatchDiscriminator
class UNetPatchDiscriminatorV2(nn.ModelBase):
"""
Inspired by https://arxiv.org/abs/2002.12655 "A U-Net Based Discriminator for Generative Adversarial Networks"
"""
def calc_receptive_field_size(self, layers):
"""
result the same as https://fomoro.com/research/article/receptive-field-calculatorindex.html
"""
rf = 0
ts = 1
for i, (k, s) in enumerate(layers):
if i == 0:
rf = k
else:
rf += (k-1)*ts
ts *= s
return rf
def find_archi(self, target_patch_size, max_layers=6):
"""
Find the best configuration of layers using only 3x3 convs for target patch size
"""
s = {}
for layers_count in range(1,max_layers+1):
val = 1 << (layers_count-1)
while True:
val -= 1
layers = []
sum_st = 0
for i in range(layers_count-1):
st = 1 + (1 if val & (1 << i) !=0 else 0 )
layers.append ( [3, st ])
sum_st += st
layers.append ( [3, 2])
sum_st += 2
rf = self.calc_receptive_field_size(layers)
s_rf = s.get(rf, None)
if s_rf is None:
s[rf] = (layers_count, sum_st, layers)
else:
if layers_count < s_rf[0] or \
( layers_count == s_rf[0] and sum_st > s_rf[1] ):
s[rf] = (layers_count, sum_st, layers)
if val == 0:
break
x = sorted(list(s.keys()))
q=x[np.abs(np.array(x)-target_patch_size).argmin()]
return s[q][2]
def on_build(self, patch_size, in_ch):
class ResidualBlock(nn.ModelBase):
def on_build(self, ch, kernel_size=3 ):
self.conv1 = nn.Conv2D( ch, ch, kernel_size=kernel_size, padding='SAME')
self.conv2 = nn.Conv2D( ch, ch, kernel_size=kernel_size, padding='SAME')
def forward(self, inp):
x = self.conv1(inp)
x = tf.nn.leaky_relu(x, 0.2)
x = self.conv2(x)
x = tf.nn.leaky_relu(inp + x, 0.2)
return x
prev_ch = in_ch
self.convs = []
self.res = []
self.upconvs = []
self.upres = []
layers = self.find_archi(patch_size)
base_ch = 16
level_chs = { i-1:v for i,v in enumerate([ min( base_ch * (2**i), 512 ) for i in range(len(layers)+1)]) }
self.in_conv = nn.Conv2D( in_ch, level_chs[-1], kernel_size=1, padding='VALID')
for i, (kernel_size, strides) in enumerate(layers):
self.convs.append ( nn.Conv2D( level_chs[i-1], level_chs[i], kernel_size=kernel_size, strides=strides, padding='SAME') )
self.res.append ( ResidualBlock(level_chs[i]) )
self.upconvs.insert (0, nn.Conv2DTranspose( level_chs[i]*(2 if i != len(layers)-1 else 1), level_chs[i-1], kernel_size=kernel_size, strides=strides, padding='SAME') )
self.upres.insert (0, ResidualBlock(level_chs[i-1]*2) )
self.out_conv = nn.Conv2D( level_chs[-1]*2, 1, kernel_size=1, padding='VALID')
self.center_out = nn.Conv2D( level_chs[len(layers)-1], 1, kernel_size=1, padding='VALID')
self.center_conv = nn.Conv2D( level_chs[len(layers)-1], level_chs[len(layers)-1], kernel_size=1, padding='VALID')
def forward(self, x):
x = tf.nn.leaky_relu( self.in_conv(x), 0.1 )
encs = []
for conv, res in zip(self.convs, self.res):
encs.insert(0, x)
x = tf.nn.leaky_relu( conv(x), 0.1 )
x = res(x)
center_out, x = self.center_out(x), self.center_conv(x)
for i, (upconv, enc, upres) in enumerate(zip(self.upconvs, encs, self.upres)):
x = tf.nn.leaky_relu( upconv(x), 0.1 )
x = tf.concat( [enc, x], axis=nn.conv2d_ch_axis)
x = upres(x)
return center_out, self.out_conv(x)
nn.UNetPatchDiscriminatorV2 = UNetPatchDiscriminatorV2

View file

@ -140,9 +140,12 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.') self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')
default_gan_version = self.options['gan_version'] = self.load_or_def_option('gan_version', 2)
default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0) default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0)
default_gan_patch_size = self.options['gan_patch_size'] = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8) default_gan_patch_size = self.options['gan_patch_size'] = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8)
default_gan_dims = self.options['gan_dims'] = self.load_or_def_option('gan_dims', 16) default_gan_dims = self.options['gan_dims'] = self.load_or_def_option('gan_dims', 16)
default_gan_smoothing = self.options['gan_smoothing'] = self.load_or_def_option('gan_smoothing', 0.1)
default_gan_noise = self.options['gan_noise'] = self.load_or_def_option('gan_noise', 0.05)
if self.is_first_run() or ask_override: if self.is_first_run() or ask_override:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.") self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
@ -155,14 +158,23 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.") self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 1.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with lr_dropout(on) and random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 1.0 ) self.options['gan_version'] = np.clip (io.input_int("GAN version", default_gan_version, add_info="2 or 3", help_message="Choose GAN version (v2: 7/16/2020, v3: 1/3/2021):"), 2, 3)
if self.options['gan_version'] == 2:
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Forces the neural network to learn small details of the face. Enable it only when the face is trained enough and don't disable. Typical value is 0.1"), 0.0, 10.0 )
else:
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 1.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with lr_dropout(on) and random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 1.0 )
if self.options['gan_power'] != 0.0: if self.options['gan_power'] != 0.0:
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 ) if self.options['gan_version'] == 3:
self.options['gan_patch_size'] = gan_patch_size gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
self.options['gan_patch_size'] = gan_patch_size
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 ) gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
self.options['gan_dims'] = gan_dims self.options['gan_dims'] = gan_dims
self.options['gan_smoothing'] = np.clip ( io.input_number("GAN label smoothing", default_gan_smoothing, add_info="0 - 0.5", help_message="Uses soft labels with values slightly off from 0/1 for GAN, has a regularizing effect"), 0, 0.5)
self.options['gan_noise'] = np.clip ( io.input_number("GAN noisy labels", default_gan_noise, add_info="0 - 0.5", help_message="Marks some images with the wrong label, helps prevent collapse"), 0, 0.5)
if 'df' in self.options['archi']: if 'df' in self.options['archi']:
self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 ) self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
@ -303,8 +315,12 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
if self.is_training: if self.is_training:
if gan_power != 0: if gan_power != 0:
self.D_src = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], name="D_src") if self.options['gan_version'] == 2:
self.model_filename_list += [ [self.D_src, 'GAN.npy'] ] self.D_src = nn.UNetPatchDiscriminatorV2(patch_size=resolution//16, in_ch=input_ch, name="D_src")
self.model_filename_list += [ [self.D_src, 'D_src_v2.npy'] ]
else:
self.D_src = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], name="D_src")
self.model_filename_list += [ [self.D_src, 'GAN.npy'] ]
# Initialize optimizers # Initialize optimizers
lr=5e-5 lr=5e-5
@ -329,9 +345,14 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.model_filename_list += [ (self.D_code_opt, 'D_code_opt.npy') ] self.model_filename_list += [ (self.D_code_opt, 'D_code_opt.npy') ]
if gan_power != 0: if gan_power != 0:
self.D_src_dst_opt = OptimizerClass(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='GAN_opt') if self.options['gan_version'] == 2:
self.D_src_dst_opt.initialize_variables ( self.D_src.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights() self.D_src_dst_opt = OptimizerClass(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='D_src_dst_opt')
self.model_filename_list += [ (self.D_src_dst_opt, 'GAN_opt.npy') ] self.D_src_dst_opt.initialize_variables ( self.D_src.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights()
self.model_filename_list += [ (self.D_src_dst_opt, 'D_src_v2_opt.npy') ]
else:
self.D_src_dst_opt = OptimizerClass(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='GAN_opt')
self.D_src_dst_opt.initialize_variables ( self.D_src.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights()
self.model_filename_list += [ (self.D_src_dst_opt, 'GAN_opt.npy') ]
if self.is_training: if self.is_training:
# Adjust batch size for multiple GPU # Adjust batch size for multiple GPU
@ -537,22 +558,37 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
gpu_pred_src_src_d, \ gpu_pred_src_src_d, \
gpu_pred_src_src_d2 = self.D_src(gpu_pred_src_src_masked_opt) gpu_pred_src_src_d2 = self.D_src(gpu_pred_src_src_masked_opt)
gpu_pred_src_src_d_ones = tf.ones_like (gpu_pred_src_src_d) def get_smooth_noisy_labels(label, tensor, smoothing=0.1, noise=0.05):
gpu_pred_src_src_d_zeros = tf.zeros_like(gpu_pred_src_src_d) num_labels = self.batch_size
for d in tensor.get_shape().as_list()[1:]:
num_labels *= d
gpu_pred_src_src_d2_ones = tf.ones_like (gpu_pred_src_src_d2) probs = tf.math.log([[noise, 1-noise]]) if label == 1 else tf.math.log([[1-noise, noise]])
gpu_pred_src_src_d2_zeros = tf.zeros_like(gpu_pred_src_src_d2) x = tf.random.categorical(probs, num_labels)
x = tf.cast(x, tf.float32)
x = tf.math.scalar_mul(1-smoothing, x)
# x = x + (smoothing/num_labels)
x = tf.reshape(x, (self.batch_size,) + tensor.shape[1:])
return x
gpu_target_src_d, \ smoothing = self.options['gan_smoothing']
gpu_target_src_d2 = self.D_src(gpu_target_src_masked_opt) noise = self.options['gan_noise']
gpu_target_src_d_ones = tf.ones_like(gpu_target_src_d) gpu_pred_src_src_d_ones = tf.ones_like(gpu_pred_src_src_d)
gpu_target_src_d2_ones = tf.ones_like(gpu_target_src_d2) gpu_pred_src_src_d2_ones = tf.ones_like(gpu_pred_src_src_d2)
gpu_D_src_dst_loss = (DLoss(gpu_target_src_d_ones , gpu_target_src_d) + \ gpu_pred_src_src_d_smooth_zeros = get_smooth_noisy_labels(0, gpu_pred_src_src_d, smoothing=smoothing, noise=noise)
DLoss(gpu_pred_src_src_d_zeros , gpu_pred_src_src_d) ) * 0.5 + \ gpu_pred_src_src_d2_smooth_zeros = get_smooth_noisy_labels(0, gpu_pred_src_src_d2, smoothing=smoothing, noise=noise)
(DLoss(gpu_target_src_d2_ones , gpu_target_src_d2) + \
DLoss(gpu_pred_src_src_d2_zeros , gpu_pred_src_src_d2) ) * 0.5 gpu_target_src_d, gpu_target_src_d2 = self.D_src(gpu_target_src_masked_opt)
gpu_target_src_d_smooth_ones = get_smooth_noisy_labels(1, gpu_target_src_d, smoothing=smoothing, noise=noise)
gpu_target_src_d2_smooth_ones = get_smooth_noisy_labels(1, gpu_target_src_d2, smoothing=smoothing, noise=noise)
gpu_D_src_dst_loss = DLoss(gpu_target_src_d_smooth_ones, gpu_target_src_d) \
+ DLoss(gpu_pred_src_src_d_smooth_zeros, gpu_pred_src_src_d) \
+ DLoss(gpu_target_src_d2_smooth_ones, gpu_target_src_d2) \
+ DLoss(gpu_pred_src_src_d2_smooth_zeros, gpu_pred_src_src_d2)
gpu_D_src_dst_loss_gvs += [ nn.gradients (gpu_D_src_dst_loss, self.D_src.get_weights() ) ]#+self.D_src_x2.get_weights() gpu_D_src_dst_loss_gvs += [ nn.gradients (gpu_D_src_dst_loss, self.D_src.get_weights() ) ]#+self.D_src_x2.get_weights()