mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-21 22:13:20 -07:00
Merge pull request #128 from faceshiftlabs/feature/gan-updates
Feature/gan updates
This commit is contained in:
commit
9c08a54f75
1 changed files with 38 additions and 17 deletions
|
@ -142,6 +142,8 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
|||
default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0)
|
||||
default_gan_patch_size = self.options['gan_patch_size'] = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8)
|
||||
default_gan_dims = self.options['gan_dims'] = self.load_or_def_option('gan_dims', 16)
|
||||
default_gan_smoothing = self.options['gan_smoothing'] = self.load_or_def_option('gan_smoothing', 0.1)
|
||||
default_gan_noise = self.options['gan_noise'] = self.load_or_def_option('gan_noise', 0.05)
|
||||
|
||||
if self.is_first_run() or ask_override:
|
||||
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
|
||||
|
@ -159,12 +161,16 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
|||
else:
|
||||
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 1.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with lr_dropout(on) and random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 1.0 )
|
||||
|
||||
if self.options['gan_power'] != 0.0 and self.options['gan_version'] == 3:
|
||||
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
|
||||
self.options['gan_patch_size'] = gan_patch_size
|
||||
if self.options['gan_power'] != 0.0:
|
||||
if self.options['gan_version'] == 3:
|
||||
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
|
||||
self.options['gan_patch_size'] = gan_patch_size
|
||||
|
||||
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
|
||||
self.options['gan_dims'] = gan_dims
|
||||
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
|
||||
self.options['gan_dims'] = gan_dims
|
||||
|
||||
self.options['gan_smoothing'] = np.clip ( io.input_number("GAN label smoothing", default_gan_smoothing, add_info="0 - 0.5", help_message="Uses soft labels with values slightly off from 0/1 for GAN, has a regularizing effect"), 0, 0.5)
|
||||
self.options['gan_noise'] = np.clip ( io.input_number("GAN noisy labels", default_gan_noise, add_info="0 - 0.5", help_message="Marks some images with the wrong label, helps prevent collapse"), 0, 0.5)
|
||||
|
||||
if 'df' in self.options['archi']:
|
||||
self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
|
||||
|
@ -536,22 +542,37 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
|||
gpu_pred_src_src_d, \
|
||||
gpu_pred_src_src_d2 = self.D_src(gpu_pred_src_src_masked_opt)
|
||||
|
||||
gpu_pred_src_src_d_ones = tf.ones_like (gpu_pred_src_src_d)
|
||||
gpu_pred_src_src_d_zeros = tf.zeros_like(gpu_pred_src_src_d)
|
||||
def get_smooth_noisy_labels(label, tensor, smoothing=0.1, noise=0.05):
|
||||
num_labels = self.batch_size
|
||||
for d in tensor.get_shape().as_list()[1:]:
|
||||
num_labels *= d
|
||||
|
||||
gpu_pred_src_src_d2_ones = tf.ones_like (gpu_pred_src_src_d2)
|
||||
gpu_pred_src_src_d2_zeros = tf.zeros_like(gpu_pred_src_src_d2)
|
||||
probs = tf.math.log([[noise, 1-noise]]) if label == 1 else tf.math.log([[1-noise, noise]])
|
||||
x = tf.random.categorical(probs, num_labels)
|
||||
x = tf.cast(x, tf.float32)
|
||||
x = tf.math.scalar_mul(1-smoothing, x)
|
||||
# x = x + (smoothing/num_labels)
|
||||
x = tf.reshape(x, (self.batch_size,) + tensor.shape[1:])
|
||||
return x
|
||||
|
||||
gpu_target_src_d, \
|
||||
gpu_target_src_d2 = self.D_src(gpu_target_src_masked_opt)
|
||||
smoothing = self.options['gan_smoothing']
|
||||
noise = self.options['gan_noise']
|
||||
|
||||
gpu_target_src_d_ones = tf.ones_like(gpu_target_src_d)
|
||||
gpu_target_src_d2_ones = tf.ones_like(gpu_target_src_d2)
|
||||
gpu_pred_src_src_d_ones = tf.ones_like(gpu_pred_src_src_d)
|
||||
gpu_pred_src_src_d2_ones = tf.ones_like(gpu_pred_src_src_d2)
|
||||
|
||||
gpu_D_src_dst_loss = (DLoss(gpu_target_src_d_ones , gpu_target_src_d) + \
|
||||
DLoss(gpu_pred_src_src_d_zeros , gpu_pred_src_src_d) ) * 0.5 + \
|
||||
(DLoss(gpu_target_src_d2_ones , gpu_target_src_d2) + \
|
||||
DLoss(gpu_pred_src_src_d2_zeros , gpu_pred_src_src_d2) ) * 0.5
|
||||
gpu_pred_src_src_d_smooth_zeros = get_smooth_noisy_labels(0, gpu_pred_src_src_d, smoothing=smoothing, noise=noise)
|
||||
gpu_pred_src_src_d2_smooth_zeros = get_smooth_noisy_labels(0, gpu_pred_src_src_d2, smoothing=smoothing, noise=noise)
|
||||
|
||||
gpu_target_src_d, gpu_target_src_d2 = self.D_src(gpu_target_src_masked_opt)
|
||||
|
||||
gpu_target_src_d_smooth_ones = get_smooth_noisy_labels(1, gpu_target_src_d, smoothing=smoothing, noise=noise)
|
||||
gpu_target_src_d2_smooth_ones = get_smooth_noisy_labels(1, gpu_target_src_d2, smoothing=smoothing, noise=noise)
|
||||
|
||||
gpu_D_src_dst_loss = DLoss(gpu_target_src_d_smooth_ones, gpu_target_src_d) \
|
||||
+ DLoss(gpu_pred_src_src_d_smooth_zeros, gpu_pred_src_src_d) \
|
||||
+ DLoss(gpu_target_src_d2_smooth_ones, gpu_target_src_d2) \
|
||||
+ DLoss(gpu_pred_src_src_d2_smooth_zeros, gpu_pred_src_src_d2)
|
||||
|
||||
gpu_D_src_dst_loss_gvs += [ nn.gradients (gpu_D_src_dst_loss, self.D_src.get_weights() ) ]#+self.D_src_x2.get_weights()
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue