From 65a3da435cbec8a410c49c52ce022b1e0eda52d3 Mon Sep 17 00:00:00 2001 From: sinofis Date: Wed, 14 Oct 2020 18:54:01 +0200 Subject: [PATCH] syntax fixes --- core/imagelib/color_transfer.py | 4 +++- facelib/LandmarksProcessor.py | 2 +- models/Model_SAEHD/Model.py | 25 +++++++++++++------------ samplelib/SampleProcessor.py | 14 ++++++-------- 4 files changed, 23 insertions(+), 22 deletions(-) diff --git a/core/imagelib/color_transfer.py b/core/imagelib/color_transfer.py index 59be668..a1cb29a 100644 --- a/core/imagelib/color_transfer.py +++ b/core/imagelib/color_transfer.py @@ -1,6 +1,8 @@ import cv2 import numpy as np from numpy import linalg as npla +from random import random, shuffle, choice +from scipy.stats import special_ortho_group import scipy as sp def color_transfer_sot(src,trg, steps=10, batch_size=5, reg_sigmaXY=16.0, reg_sigmaV=5.0): @@ -376,7 +378,7 @@ def color_augmentation(img): face = random_clahe(face) face = random_lab(face) img[:, :, :3] = face - return img.astype('float32') / 255.0 + return np.clip(img.astype('float32') / 255.0, 0, 1) def random_lab(image): diff --git a/facelib/LandmarksProcessor.py b/facelib/LandmarksProcessor.py index 2181fe1..23430e5 100644 --- a/facelib/LandmarksProcessor.py +++ b/facelib/LandmarksProcessor.py @@ -443,7 +443,7 @@ def get_image_mouth_mask (image_shape, image_landmarks): image_landmarks = image_landmarks.astype(np.int) - cv2.fillConvexPoly( hull_mask, cv2.convexHull( image_landmarks[48, 60]), (1,) ) + cv2.fillConvexPoly( hull_mask, cv2.convexHull( image_landmarks[48:60]), (1,) ) dilate = h // 32 hull_mask = cv2.dilate(hull_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(dilate,dilate)), iterations = 1 ) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index c214b46..d34e7dd 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -286,7 +286,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.D_src_x2 = nn.PatchDiscriminator(patch_size=resolution//32, in_ch=input_ch, name="D_src_x2") self.model_filename_list += [ [self.D_src, 'D_src.npy'] ] self.model_filename_list += [ [self.D_src_x2, 'D_src_x2.npy'] ] - elif: + else: self.D_src = nn.UNetPatchDiscriminator(patch_size=resolution//16, in_ch=input_ch, name="D_src") self.model_filename_list += [ [self.D_src, 'D_src_v2.npy'] ] @@ -315,7 +315,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... if gan_old: self.D_src_dst_opt.initialize_variables ( self.D_src.get_weights()+self.D_src_x2.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu') self.model_filename_list += [ (self.D_src_dst_opt, 'D_src_dst_opt.npy') ] - elif: + else: self.D_src_dst_opt.initialize_variables ( self.D_src.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights() self.model_filename_list += [ (self.D_src_dst_opt, 'D_src_v2_opt.npy') ] @@ -485,16 +485,16 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... if gan_power != 0: if gan_old: gpu_pred_src_src_d = self.D_src(gpu_pred_src_src_masked_opt) - - gpu_pred_src_src_d_ones = tf.ones_like (gpu_pred_src_src_d) gpu_pred_src_src_d_ones = tf.ones_like (gpu_pred_src_src_d) - gpu_pred_src_src_d_zeros = tf.zeros_like(gpu_pred_src_src_d) gpu_pred_src_src_d_zeros = tf.zeros_like(gpu_pred_src_src_d) + + gpu_pred_src_src_d_ones = tf.ones_like (gpu_pred_src_src_d) + gpu_pred_src_src_d_zeros = tf.zeros_like(gpu_pred_src_src_d) gpu_target_src_d = self.D_src(gpu_target_src_masked_opt) gpu_target_src_d_ones = tf.ones_like(gpu_target_src_d) - gpu_pred_src_src_x2_d = self.D_src_x2(gpu_pred_src_src_masked_opt) gpu_pred_src_src_d2_ones = tf.ones_like (gpu_pred_src_src_d2) - gpu_pred_src_src_x2_d_ones = tf.ones_like (gpu_pred_src_src_x2_d) gpu_pred_src_src_d2_zeros = tf.zeros_like(gpu_pred_src_src_d2) + gpu_pred_src_src_x2_d = self.D_src_x2(gpu_pred_src_src_masked_opt) + gpu_pred_src_src_x2_d_ones = tf.ones_like (gpu_pred_src_src_x2_d) gpu_pred_src_src_x2_d_zeros = tf.zeros_like(gpu_pred_src_src_x2_d) - gpu_target_src_x2_d = self.D_src_x2(gpu_target_src_masked_opt) gpu_target_src_d, \ + gpu_target_src_x2_d = self.D_src_x2(gpu_target_src_masked_opt) gpu_target_src_x2_d_ones = tf.ones_like(gpu_target_src_x2_d) gpu_D_src_dst_loss = (DLoss(gpu_target_src_d_ones , gpu_target_src_d) + \ @@ -507,7 +507,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... gpu_G_loss += 0.5*gan_power*( DLoss(gpu_pred_src_src_d_ones, gpu_pred_src_src_d) + DLoss(gpu_pred_src_src_x2_d_ones, gpu_pred_src_src_x2_d)) - elif: + else: gpu_pred_src_src_d, \ gpu_pred_src_src_d2 = self.D_src(gpu_pred_src_src_masked_opt) @@ -648,6 +648,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... if ct_mode is not None: src_generators_count = int(src_generators_count * 1.5) + fs_aug = None + if ct_mode == 'fs-aug': + fs_aug = 'fs-aug' + self.set_training_data_generators ([ SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), @@ -658,9 +662,6 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... uniform_yaw_distribution=self.options['uniform_yaw'] or self.pretrain, generators_count=src_generators_count ), - fs_aug = None - if ct_mode == 'fs-aug': - fs_aug = 'fs-aug' SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, diff --git a/samplelib/SampleProcessor.py b/samplelib/SampleProcessor.py index 38e64dd..eb515f9 100644 --- a/samplelib/SampleProcessor.py +++ b/samplelib/SampleProcessor.py @@ -72,13 +72,15 @@ class SampleProcessor(object): eyes_mask = LandmarksProcessor.get_image_eye_mask (sample_bgr.shape, sample_landmarks) # set eye masks to 1-2 clip = np.clip(eyes_mask, 0, 1) - return a[a > 0.1] += 1 + clip[clip > 0.1] += 1 + return clip def get_mouth_mask(): mouth_mask = LandmarksProcessor.get_image_mouth_mask (sample_bgr.shape, sample_landmarks) # set eye masks to 2-3 clip = np.clip(mouth_mask, 0, 1) - return a[a > 0.1] += 2 + clip[clip > 0.1] += 2 + return clip is_face_sample = sample_landmarks is not None @@ -200,18 +202,14 @@ class SampleProcessor(object): if ct_mode is not None and ct_sample is not None or ct_mode == 'fs-aug': if 'fs-aug': img = imagelib.color_augmentation(img) - elif: + else: if ct_sample_bgr is None: ct_sample_bgr = ct_sample.load_bgr() img = imagelib.color_transfer (ct_mode, img, cv2.resize( ct_sample_bgr, (resolution,resolution), interpolation=cv2.INTER_LINEAR ) ) img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=border_replicate) - - img = np.clip(img.astype(np.float32), 0, 1) - - - + img = np.clip(img.astype(np.float32), 0, 1) if motion_blur is not None: random_mask = sd.random_circle_faded ([resolution,resolution], rnd_state=np.random.RandomState (sample_rnd_seed+2)) if random_circle_mask else None