diff --git a/core/leras/models/PatchDiscriminator.py b/core/leras/models/PatchDiscriminator.py index 343e000..868151d 100644 --- a/core/leras/models/PatchDiscriminator.py +++ b/core/leras/models/PatchDiscriminator.py @@ -146,11 +146,7 @@ class UNetPatchDiscriminator(nn.ModelBase): prev_ch = in_ch self.convs = [] - self.res1 = [] - self.res2 = [] self.upconvs = [] - self.upres1 = [] - self.upres2 = [] layers = self.find_archi(patch_size) level_chs = { i-1:v for i,v in enumerate([ min( base_ch * (2**i), 512 ) for i in range(len(layers)+1)]) } @@ -160,14 +156,8 @@ class UNetPatchDiscriminator(nn.ModelBase): for i, (kernel_size, strides) in enumerate(layers): self.convs.append ( nn.Conv2D( level_chs[i-1], level_chs[i], kernel_size=kernel_size, strides=strides, padding='SAME') ) - self.res1.append ( ResidualBlock(level_chs[i]) ) - self.res2.append ( ResidualBlock(level_chs[i]) ) - self.upconvs.insert (0, nn.Conv2DTranspose( level_chs[i]*(2 if i != len(layers)-1 else 1), level_chs[i-1], kernel_size=kernel_size, strides=strides, padding='SAME') ) - self.upres1.insert (0, ResidualBlock(level_chs[i-1]*2) ) - self.upres2.insert (0, ResidualBlock(level_chs[i-1]*2) ) - self.out_conv = nn.Conv2D( level_chs[-1]*2, 1, kernel_size=1, padding='VALID') self.center_out = nn.Conv2D( level_chs[len(layers)-1], 1, kernel_size=1, padding='VALID') @@ -178,19 +168,15 @@ class UNetPatchDiscriminator(nn.ModelBase): x = tf.nn.leaky_relu( self.in_conv(x), 0.2 ) encs = [] - for conv, res1,res2 in zip(self.convs, self.res1, self.res2): + for conv in self.convs: encs.insert(0, x) x = tf.nn.leaky_relu( conv(x), 0.2 ) - x = res1(x) - x = res2(x) center_out, x = self.center_out(x), tf.nn.leaky_relu( self.center_conv(x), 0.2 ) - for i, (upconv, enc, upres1, upres2 ) in enumerate(zip(self.upconvs, encs, self.upres1, self.upres2)): + for i, (upconv, enc) in enumerate(zip(self.upconvs, encs)): x = tf.nn.leaky_relu( upconv(x), 0.2 ) x = tf.concat( [enc, x], axis=nn.conv2d_ch_axis) - x = upres1(x) - x = upres2(x) return center_out, self.out_conv(x) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 62468ae..25149f4 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -123,7 +123,7 @@ class AMPModel(ModelBase): gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 ) self.options['gan_patch_size'] = gan_patch_size - gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 ) + gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-512", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 512 ) self.options['gan_dims'] = gan_dims self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.") diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 7e548db..147defe 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -156,7 +156,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 ) self.options['gan_patch_size'] = gan_patch_size - gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 ) + gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-512", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 512 ) self.options['gan_dims'] = gan_dims if 'df' in self.options['archi']: @@ -467,7 +467,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... gpu_G_loss += self.options['true_face_power']*DLoss(gpu_src_code_d_ones, gpu_src_code_d) - gpu_D_code_loss = (DLoss(gpu_src_code_d_ones , gpu_dst_code_d) + \ + gpu_D_code_loss = (DLoss(gpu_dst_code_d_ones , gpu_dst_code_d) + \ DLoss(gpu_src_code_d_zeros, gpu_src_code_d) ) * 0.5 gpu_D_code_loss_gvs += [ nn.gradients (gpu_D_code_loss, self.code_discriminator.get_weights() ) ]