diff --git a/models/ModelBase.py b/models/ModelBase.py index 5c3dbae..0a8bbba 100644 --- a/models/ModelBase.py +++ b/models/ModelBase.py @@ -226,8 +226,14 @@ class ModelBase(object): io.destroy_window(wnd_name) else: self.sample_for_preview = self.generate_next_sample() - self.last_sample = self.sample_for_preview - + + try: + self.get_static_preview() + except: + self.sample_for_preview = self.generate_next_sample() + + self.last_sample = self.sample_for_preview + ###Generate text summary of model hyperparameters #Find the longest key name and value string. Used as column widths. width_name = max([len(k) for k in self.options.keys()] + [17]) + 1 # Single space buffer to left edge. Minimum of 17, the length of the longest static string used "Current iteration" diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index f7c6934..5bdbd09 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -63,6 +63,9 @@ class SAEv2Model(ModelBase): default_face_style_power = 0.0 default_bg_style_power = 0.0 if is_first_run or ask_override: + default_random_warp = self.options.get('random_warp', True) + self.options['random_warp'] = io.input_str (f"Enable random warp of samples? ( y/n, ?:help skip:{yn_str[default_random_warp]}) : ", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness for less amount of iterations.") + default_face_style_power = default_face_style_power if is_first_run else self.options.get('face_style_power', default_face_style_power) self.options['face_style_power'] = np.clip ( io.input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power, help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 ) @@ -84,6 +87,7 @@ class SAEv2Model(ModelBase): self.options['clipgrad'] = False else: + self.options['random_warp'] = self.options.get('random_warp', True) self.options['face_style_power'] = self.options.get('face_style_power', default_face_style_power) self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power) self.options['apply_random_ct'] = self.options.get('apply_random_ct', False) @@ -452,7 +456,7 @@ class SAEv2Model(ModelBase): self.src_dst_opt = RMSprop(lr=5e-5, clipnorm=1.0 if self.options['clipgrad'] else 0.0, tf_cpu_mode=self.options['optimizer_mode']-1) self.src_dst_mask_opt = RMSprop(lr=5e-5, clipnorm=1.0 if self.options['clipgrad'] else 0.0, tf_cpu_mode=self.options['optimizer_mode']-1) self.D_opt = RMSprop(lr=5e-5, clipnorm=1.0 if self.options['clipgrad'] else 0.0, tf_cpu_mode=self.options['optimizer_mode']-1) - + src_loss = K.mean ( 10*dssim(kernel_size=int(resolution/11.6),max_value=1.0)( target_src_masked_opt, pred_src_src_masked_opt) ) src_loss += K.mean ( 10*K.square( target_src_masked_opt - pred_src_src_masked_opt ) ) @@ -517,7 +521,7 @@ class SAEv2Model(ModelBase): face_type = t.FACE_TYPE_MID_FULL elif self.options['face_type'] == 'f': face_type = t.FACE_TYPE_FULL - + t_mode_bgr = t.MODE_BGR if not self.pretrain else t.MODE_BGR_SHUFFLE training_data_src_path = self.training_data_src_path @@ -528,20 +532,22 @@ class SAEv2Model(ModelBase): training_data_src_path = self.pretraining_data_path training_data_dst_path = self.pretraining_data_path sort_by_yaw = False + + t_img_warped = t.IMG_WARPED_TRANSFORMED if self.options['random_warp'] else t.IMG_TRANSFORMED self.set_training_data_generators ([ SampleGeneratorFace(training_data_src_path, sort_by_yaw_target_samples_path=training_data_dst_path if sort_by_yaw else None, random_ct_samples_path=training_data_dst_path if apply_random_ct else None, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), - output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t_mode_bgr), 'resolution':resolution, 'apply_ct': apply_random_ct}, + output_sample_types = [ {'types' : (t_img_warped, face_type, t_mode_bgr), 'resolution':resolution, 'apply_ct': apply_random_ct}, {'types' : (t.IMG_TRANSFORMED, face_type, t_mode_bgr), 'resolution': resolution, 'apply_ct': apply_random_ct }, {'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'resolution': resolution } ] - ), + ), SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, ), - output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t_mode_bgr), 'resolution':resolution}, + output_sample_types = [ {'types' : (t_img_warped, face_type, t_mode_bgr), 'resolution':resolution}, {'types' : (t.IMG_TRANSFORMED, face_type, t_mode_bgr), 'resolution': resolution}, {'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'resolution': resolution} ]) ]) diff --git a/nnlib/nnlib.py b/nnlib/nnlib.py index 5f5a4db..5d5e17f 100644 --- a/nnlib/nnlib.py +++ b/nnlib/nnlib.py @@ -631,26 +631,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] - - #broadcast_shape = [1] * len(input_shape) - #broadcast_shape[self.axis] = input_shape[self.axis] - #normed = x# (x - K.reshape(self.moving_mean,broadcast_shape) ) / ( K.sqrt( K.reshape(self.moving_variance,broadcast_shape)) +self.epsilon) - #normed *= K.reshape(gamma,[-1]+broadcast_shape[1:] ) - #normed += K.reshape(beta, [-1]+broadcast_shape[1:] ) - #mean = K.mean(x, axis=reduction_axes) - #self.moving_mean = self.add_weight(shape=(units,), name='moving_mean', initializer='zeros',trainable=False) - #self.moving_variance = self.add_weight(shape=(units,), name='moving_variance',initializer='ones', trainable=False) - - #variance = K.var(x, axis=reduction_axes) - #sample_size = K.prod([ K.shape(x)[axis] for axis in reduction_axes ]) - #sample_size = K.cast(sample_size, dtype=K.dtype(x)) - #variance *= sample_size / (sample_size - (1.0 + self.epsilon)) - - #self.add_update([K.moving_average_update(self.moving_mean, mean, self.momentum), - # K.moving_average_update(self.moving_variance, variance, self.momentum)], None) - #return normed - del reduction_axes[0] + broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] mean = K.mean(x, reduction_axes, keepdims=True) diff --git a/samplelib/SampleProcessor.py b/samplelib/SampleProcessor.py index 3cdad0f..a8cc851 100644 --- a/samplelib/SampleProcessor.py +++ b/samplelib/SampleProcessor.py @@ -72,6 +72,7 @@ class SampleProcessor(object): MODE_GGG = 42 #3xGrayscale MODE_M = 43 #mask only MODE_BGR_SHUFFLE = 44 #BGR shuffle + MODE_BGR_RANDOM_HUE_SHIFT = 45 MODE_END = 50 class Options(object): @@ -180,7 +181,7 @@ class SampleProcessor(object): return img img = sample_bgr - + ### Prepare a mask mask = None if is_face_sample: @@ -195,8 +196,8 @@ class SampleProcessor(object): if sample.ie_polys is not None: sample.ie_polys.overlay_mask(mask) ################## - - + + if motion_blur is not None: chance, mb_max_size = motion_blur chance = np.clip(chance, 0, 100) @@ -257,6 +258,15 @@ class SampleProcessor(object): elif mode_type == SPTF.MODE_BGR_SHUFFLE: rnd_state = np.random.RandomState (sample_rnd_seed) img = np.take (img_bgr, rnd_state.permutation(img_bgr.shape[-1]), axis=-1) + elif mode_type == SPTF.MODE_BGR_RANDOM_HUE_SHIFT: + rnd_state = np.random.RandomState (sample_rnd_seed) + hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV) + h, s, v = cv2.split(hsv) + + h = (h + rnd_state.randint(360) ) % 360 + hsv = cv2.merge([h, s, v]) + + img = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 ) elif mode_type == SPTF.MODE_G: img = np.concatenate ( (np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1),img_mask) , -1 ) elif mode_type == SPTF.MODE_GGG: