From 0c10249bb2d5e820f840f99d33105a543167a75b Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 9 Dec 2021 16:00:21 +0100 Subject: [PATCH 01/13] modified schema --- models/Model_AMP/config_schema.json | 10 +++++++--- models/Model_XSeg/config_schema.json | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/models/Model_AMP/config_schema.json b/models/Model_AMP/config_schema.json index 9630537..2b3079f 100644 --- a/models/Model_AMP/config_schema.json +++ b/models/Model_AMP/config_schema.json @@ -4,11 +4,16 @@ "definitions": { "dfl_config": { "type": "object", - "additionalProperties": false, + "additionalProperties": true, "properties": { "use_fp16": { "type": "boolean" }, + "cpu_cap": { + "type": "integer", + "minimum": 1, + "maximum": 256 + }, "morph_factor": { "type": "number", "minimum":0.0, @@ -205,6 +210,7 @@ } }, "required": [ + "cpu_cap", "adabelief", "ae_dims", "autobackup_hour", @@ -232,12 +238,10 @@ "maximum_n_backups", "models_opt_on_gpu", "mouth_prio", - "pretrain", "random_blur", "random_color", "random_downsample", "random_dst_flip", - "random_hsv_power", "random_jpeg", "random_noise", "random_src_flip", diff --git a/models/Model_XSeg/config_schema.json b/models/Model_XSeg/config_schema.json index e53fc8b..927b5a5 100644 --- a/models/Model_XSeg/config_schema.json +++ b/models/Model_XSeg/config_schema.json @@ -31,7 +31,7 @@ "required": [ "batch_size", "face_type", - "pretrain", + "pretrain" ], "title": "dfl_config" } From 8ed0e9a72fa95e75378701d7d4d260c5b1c95ee7 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 9 Dec 2021 16:01:40 +0100 Subject: [PATCH 02/13] cpu_cap, preview, learning rate options --- models/Model_AMP/Model.py | 12 +++++++---- models/Model_SAEHD/Model.py | 31 ++++++++++++++++++++------- models/Model_SAEHD/config_schema.json | 9 ++++++-- 3 files changed, 38 insertions(+), 14 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 4ab859f..8bbcfaf 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -58,6 +58,7 @@ class AMPModel(ModelBase): default_random_color = self.options['random_color'] = self.load_or_def_option('random_color', False) default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False) default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False) + default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('default_cpu_cap', 8) ask_override = False if self.read_from_conf else self.ask_override() if self.is_first_run() or ask_override: @@ -72,6 +73,7 @@ class AMPModel(ModelBase): self.ask_random_dst_flip() self.ask_batch_size(8) self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.') + self.options['cpu_cap'] = np.clip ( io.input_int ("Max cpu cores to use.", default_cpu_cap, add_info="1 - 256", help_message="Typical fine value is 0.5"), 1, 256 ) @@ -106,6 +108,8 @@ class AMPModel(ModelBase): if self.is_first_run() or ask_override: if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf: + + morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="Typical fine value is 0.5"), 0.1, 0.5 ) self.options['morph_factor'] = morph_factor @@ -142,7 +146,7 @@ class AMPModel(ModelBase): self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="") self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="") - self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) + #self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 5.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 5.0 ) @@ -196,7 +200,7 @@ class AMPModel(ModelBase): morph_factor = self.options['morph_factor'] gan_power = self.gan_power = self.options['gan_power'] random_warp = self.options['random_warp'] - random_hsv_power = self.options['random_hsv_power'] + #random_hsv_power = self.options['random_hsv_power'] if 'eyes_mouth_prio' in self.options: self.options.pop('eyes_mouth_prio') @@ -741,7 +745,7 @@ class AMPModel(ModelBase): random_ct_samples_path=training_data_dst_path if ct_mode is not None else None #and not self.pretrain - cpu_count = multiprocessing.cpu_count() + cpu_count = min(multiprocessing.cpu_count(), self.options['cpu_cap']) src_generators_count = cpu_count // 2 dst_generators_count = cpu_count // 2 if ct_mode is not None: @@ -762,7 +766,7 @@ class AMPModel(ModelBase): 'random_blur': self.options['random_blur'], 'random_jpeg': self.options['random_jpeg'], 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, - 'random_hsv_shift_amount' : random_hsv_power, + #'random_hsv_shift_amount' : random_hsv_power, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 51569ed..26c9766 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -72,7 +72,9 @@ class SAEHDModel(ModelBase): default_random_color = self.options['random_color'] = self.load_or_def_option('random_color', False) default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False) default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False) - #default_use_fp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False) + default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('cpu_cap', 8) + default_preview_samples = self.options['default_preview_samples'] = self.load_or_def_option('preview_samples', 4) + default_lr_modifier = self.options['lr_modifier'] = self.load_or_def_option('lr_modifier', 0) ask_override = False if self.read_from_conf else self.ask_override() if self.is_first_run() or ask_override: @@ -81,12 +83,15 @@ class SAEHDModel(ModelBase): self.ask_autobackup_hour() self.ask_maximum_n_backups() self.ask_write_preview_history() + self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_cpu_cap, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 ) self.ask_target_iter() self.ask_retraining_samples() self.ask_random_src_flip() self.ask_random_dst_flip() self.ask_batch_size(suggest_batch_size) self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.') + self.options['cpu_cap'] = np.clip ( io.input_int ("Max cpu cores to use.", default_cpu_cap, add_info="1 - 256", help_message="Typical fine value is 8"), 1, 256 ) + if self.is_first_run(): if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf: @@ -176,10 +181,12 @@ class SAEHDModel(ModelBase): self.options['loss_function'] = io.input_str(f"Loss function", default_loss_function, ['SSIM', 'MS-SSIM', 'MS-SSIM+L1'], help_message="Change loss function used for image quality assessment.") + + self.options['lr_modifier'] = np.clip (io.input_int("Learningrate factor", default_lr_modifier, add_info="-100 .. 100", help_message="Modify the Learning rate: 100 == multipy by 4, -100 == divide by 4"), -100, 100) self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.") - self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) + #self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="") self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="") @@ -270,14 +277,14 @@ class SAEHDModel(ModelBase): random_warp = False if self.pretrain else self.options['random_warp'] random_src_flip = self.random_src_flip if not self.pretrain else True random_dst_flip = self.random_dst_flip if not self.pretrain else True - random_hsv_power = self.options['random_hsv_power'] if not self.pretrain else 0.0 + #random_hsv_power = self.options['random_hsv_power'] if not self.pretrain else 0.0 blur_out_mask = self.options['blur_out_mask'] if self.pretrain: self.options_show_override['lr_dropout'] = 'n' self.options_show_override['random_warp'] = False self.options_show_override['gan_power'] = 0.0 - self.options_show_override['random_hsv_power'] = 0.0 + #self.options_show_override['random_hsv_power'] = 0.0 self.options_show_override['face_style_power'] = 0.0 self.options_show_override['bg_style_power'] = 0.0 self.options_show_override['uniform_yaw'] = True @@ -360,7 +367,14 @@ class SAEHDModel(ModelBase): self.model_filename_list += [ [self.D_src, 'GAN.npy'] ] # Initialize optimizers - lr=5e-5 + lr_modifier = self.options['lr_modifier'] + if lr_modifier == 0: + lr = 5e-5 + elif lr_modifier > 0: + lr = 5e-5 / abs( lr_modifier * 4/100 ) + else: + lr = 5e-5 * abs( lr_modifier * 4/100 ) + if self.options['lr_dropout'] in ['y','cpu'] and not self.pretrain: lr_cos = 500 lr_dropout = 0.3 @@ -795,7 +809,7 @@ class SAEHDModel(ModelBase): random_ct_samples_path=training_data_dst_path if ct_mode is not None and not self.pretrain else None - cpu_count = multiprocessing.cpu_count() + cpu_count = min(multiprocessing.cpu_count(), self.options['cpu_cap']) src_generators_count = cpu_count // 2 dst_generators_count = cpu_count // 2 if ct_mode is not None: @@ -815,7 +829,7 @@ class SAEHDModel(ModelBase): 'random_noise': self.options['random_noise'], 'random_blur': self.options['random_blur'], 'random_jpeg': self.options['random_jpeg'], - 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'random_hsv_shift_amount' : random_hsv_power, + 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, #'random_hsv_shift_amount' : random_hsv_power, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, @@ -965,7 +979,8 @@ class SAEHDModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + #n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + n_samples = min(self.get_batch_size(), self.options['preview_samples']) if filenames is not None and len(filenames) > 0: for i in range(n_samples): diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index 57e4095..d9587cc 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -4,7 +4,7 @@ "definitions": { "dfl_config": { "type": "object", - "additionalProperties": false, + "additionalProperties": true, "properties": { "use_fp16": { "type": "boolean" @@ -13,6 +13,11 @@ "type": "string", "pattern": "^(df|liae)-(\\b(?!\\w*(\\w)\\w*\\1)[udtc]+\\b)+|^(df|liae)$" }, + "cpu_cap": { + "type": "integer", + "minimum": 1, + "maximum": 256 + }, "resolution": { "type": "integer", "minimum": 64, @@ -213,6 +218,7 @@ } }, "required": [ + "cpu_cap", "adabelief", "ae_dims", "archi", @@ -246,7 +252,6 @@ "random_color", "random_downsample", "random_dst_flip", - "random_hsv_power", "random_jpeg", "random_noise", "random_src_flip", From 795bf394954e61b30621a5524e1be67b69db506b Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 9 Dec 2021 16:01:57 +0100 Subject: [PATCH 03/13] cap cpu during filtering --- samplelib/SampleGeneratorFaceXSeg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index 334213d..240f721 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -209,7 +209,7 @@ class SegmentedSampleFilterSubprocessor(Subprocessor): #override def process_info_generator(self): - for i in range(multiprocessing.cpu_count()): + for i in range(min(multiprocessing.cpu_count(),8)): yield 'CPU%d' % (i), {}, {'samples':self.samples, 'count_xseg_mask':self.count_xseg_mask} #override From 447ac28b4db14a3dd39e4919871f2cda01d12882 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 9 Dec 2021 16:54:26 +0100 Subject: [PATCH 04/13] bug fix modifier --- models/Model_SAEHD/Model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 26c9766..764e88d 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -370,7 +370,7 @@ class SAEHDModel(ModelBase): lr_modifier = self.options['lr_modifier'] if lr_modifier == 0: lr = 5e-5 - elif lr_modifier > 0: + elif lr_modifier < 0: lr = 5e-5 / abs( lr_modifier * 4/100 ) else: lr = 5e-5 * abs( lr_modifier * 4/100 ) From f0b018ceb1ac11669c411a5ee1df970276ca74c6 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 9 Dec 2021 17:39:42 +0100 Subject: [PATCH 05/13] added preview_samples, learning_rate and cpu cores as option --- models/Model_AMP/Model.py | 50 ++++++++++++++++++--------- models/Model_AMP/config_schema.json | 6 ++++ models/Model_SAEHD/Model.py | 2 +- models/Model_SAEHD/config_schema.json | 12 +++++++ 4 files changed, 53 insertions(+), 17 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 8bbcfaf..c54b57c 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -59,6 +59,8 @@ class AMPModel(ModelBase): default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False) default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False) default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('default_cpu_cap', 8) + default_preview_samples = self.options['preview_samples'] = self.load_or_def_option('preview_samples', 4) + default_lr_modifier = self.options['lr_modifier'] = self.load_or_def_option('lr_modifier', 0) ask_override = False if self.read_from_conf else self.ask_override() if self.is_first_run() or ask_override: @@ -67,6 +69,7 @@ class AMPModel(ModelBase): self.ask_session_name() self.ask_maximum_n_backups() self.ask_write_preview_history() + self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_cpu_cap, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 ) self.ask_target_iter() self.ask_retraining_samples() self.ask_random_src_flip() @@ -125,6 +128,8 @@ class AMPModel(ModelBase): self.options['blur_out_mask'] = io.input_bool ("Blur out mask", default_blur_out_mask, help_message='Blurs nearby area outside of applied face mask of training samples. The result is the background near the face is smoothed and less noticeable on swapped face. The exact xseg mask in src and dst faceset is required.') self.options['loss_function'] = io.input_str(f"Loss function", default_loss_function, ['SSIM', 'MS-SSIM', 'MS-SSIM+L1'], help_message="Change loss function used for image quality assessment.") + self.options['lr_modifier'] = np.clip (io.input_int("Learningrate factor", default_lr_modifier, add_info="-100 .. 100", help_message="Modify the Learning rate: 100 == multipy by 4, -100 == divide by 4"), -100, 100) + self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.") default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0) @@ -146,6 +151,7 @@ class AMPModel(ModelBase): self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="") self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="") + #self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 5.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 5.0 ) @@ -388,6 +394,13 @@ class AMPModel(ModelBase): self.model_filename_list += [ [self.GAN, 'GAN.npy'] ] # Initialize optimizers + lr_modifier = self.options['lr_modifier'] + if lr_modifier == 0: + lr = 5e-5 + elif lr_modifier < 0: + lr = 5e-5 / abs( lr_modifier * 4/100 ) + else: + lr = 5e-5 * abs( lr_modifier * 4/100 ) clipnorm = 1.0 if self.options['clipgrad'] else 0.0 if self.options['lr_dropout'] in ['y','cpu']: lr_cos = 500 @@ -398,17 +411,17 @@ class AMPModel(ModelBase): self.G_weights = self.encoder.get_weights() + self.decoder.get_weights() OptimizerClass = nn.AdaBelief if adabelief else nn.RMSprop - self.src_dst_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, clipnorm=clipnorm, name='src_dst_opt') + self.src_dst_opt = OptimizerClass(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='src_dst_opt') self.src_dst_opt.initialize_variables (self.G_weights, vars_on_cpu=optimizer_vars_on_cpu) self.model_filename_list += [ (self.src_dst_opt, 'src_dst_opt.npy') ] if gan_power != 0: if self.options['gan_version'] == 2: - self.GAN_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='D_src_dst_opt') + self.GAN_opt = OptimizerClass(lr=lr, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='D_src_dst_opt') self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights() self.model_filename_list += [ (self.GAN_opt, 'D_src_v2_opt.npy') ] else: - self.GAN_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='GAN_opt') + self.GAN_opt = OptimizerClass(lr=lr, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='GAN_opt') self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights() self.model_filename_list += [ (self.GAN_opt, 'GAN_opt.npy') ] @@ -920,27 +933,32 @@ class AMPModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + #n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + n_samples = min(self.get_batch_size(), self.options['preview_samples']) result = [] i = np.random.randint(n_samples) if not for_history else 0 - if filenames is not None and len(filenames) > 0: - S[i] = label_face_filename(S[i], filenames[0][i]) - D[i] = label_face_filename(D[i], filenames[1][i]) - - st = [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ] - st += [ np.concatenate ((SS[i], DD[i], SD_100[i] ), axis=1) ] + for i in range(n_samples if not for_history else 1): + if filenames is not None and len(filenames) > 0: + S[i] = label_face_filename(S[i], filenames[0][i]) + D[i] = label_face_filename(D[i], filenames[1][i]) + st = [] + for i in range(n_samples): + st += [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ] + st += [ np.concatenate ((SS[i], DD[i], SD_100[i] ), axis=1) ] result += [ ('AMP morph 1.0', np.concatenate (st, axis=0 )), ] - - st = [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ] - st += [ np.concatenate ((SD_065[i], SD_075[i], SD_100[i]), axis=1) ] + st = [] + for i in range(n_samples): + st += [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ] + st += [ np.concatenate ((SD_065[i], SD_075[i], SD_100[i]), axis=1) ] result += [ ('AMP morph list', np.concatenate (st, axis=0 )), ] - - st = [ np.concatenate ((DD[i], SD_025[i]*DDM_025[i]*SDM_025[i], SD_050[i]*DDM_050[i]*SDM_050[i]), axis=1) ] - st += [ np.concatenate ((SD_065[i]*DDM_065[i]*SDM_065[i], SD_075[i]*DDM_075[i]*SDM_075[i], SD_100[i]*DDM_100[i]*SDM_100[i]), axis=1) ] + st = [] + for i in range(n_samples): + st += [ np.concatenate ((DD[i], SD_025[i]*DDM_025[i]*SDM_025[i], SD_050[i]*DDM_050[i]*SDM_050[i]), axis=1) ] + st += [ np.concatenate ((SD_065[i]*DDM_065[i]*SDM_065[i], SD_075[i]*DDM_075[i]*SDM_075[i], SD_100[i]*DDM_100[i]*SDM_100[i]), axis=1) ] result += [ ('AMP morph list masked', np.concatenate (st, axis=0 )), ] return result diff --git a/models/Model_AMP/config_schema.json b/models/Model_AMP/config_schema.json index 2b3079f..96ec062 100644 --- a/models/Model_AMP/config_schema.json +++ b/models/Model_AMP/config_schema.json @@ -102,6 +102,11 @@ "MS-SSIM+L1" ] }, + "lr_modifier": { + "type": "integer", + "minimum": -100, + "maximum": 100 + }, "random_warp": { "type": "boolean" }, @@ -234,6 +239,7 @@ "gan_version", "loss_function", "lr_dropout", + "lr_modifier", "masked_training", "maximum_n_backups", "models_opt_on_gpu", diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 764e88d..17a5de7 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -73,7 +73,7 @@ class SAEHDModel(ModelBase): default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False) default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False) default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('cpu_cap', 8) - default_preview_samples = self.options['default_preview_samples'] = self.load_or_def_option('preview_samples', 4) + default_preview_samples = self.options['preview_samples'] = self.load_or_def_option('preview_samples', 4) default_lr_modifier = self.options['lr_modifier'] = self.load_or_def_option('lr_modifier', 0) ask_override = False if self.read_from_conf else self.ask_override() diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index d9587cc..9533f0c 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -95,6 +95,11 @@ "MS-SSIM+L1" ] }, + "lr_modifier": { + "type": "integer", + "minimum": -100, + "maximum": 100 + }, "random_warp": { "type": "boolean" }, @@ -155,6 +160,11 @@ "pretrain": { "type": "boolean" }, + "preview_samples": { + "type": "integer", + "minimum": 1, + "maximum": 16 + }, "session_name": { "type": "string" }, @@ -243,11 +253,13 @@ "gan_version", "loss_function", "lr_dropout", + "lr_modifier", "masked_training", "maximum_n_backups", "models_opt_on_gpu", "mouth_prio", "pretrain", + "preview_samples", "random_blur", "random_color", "random_downsample", From 84cb803d2aea403ced19a263b1216614f05beec3 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 9 Dec 2021 18:05:19 +0100 Subject: [PATCH 06/13] added alternative preview --- models/Model_AMP/Model.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index c54b57c..04cb63a 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -938,28 +938,36 @@ class AMPModel(ModelBase): result = [] - i = np.random.randint(n_samples) if not for_history else 0 + #i = np.random.randint(n_samples) if not for_history else 0 for i in range(n_samples if not for_history else 1): if filenames is not None and len(filenames) > 0: S[i] = label_face_filename(S[i], filenames[0][i]) D[i] = label_face_filename(D[i], filenames[1][i]) - st = [] - for i in range(n_samples): - st += [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ] - st += [ np.concatenate ((SS[i], DD[i], SD_100[i] ), axis=1) ] - - result += [ ('AMP morph 1.0', np.concatenate (st, axis=0 )), ] - st = [] - for i in range(n_samples): - st += [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ] + st = [] + temp_r = [] + for i in range(n_samples if not for_history else 1): + st = [ np.concatenate ((S[i], SS[i], D[i]), axis=1) ] + st += [ np.concatenate ((DD[i], DD[i]*DDM_000[i], SD_100[i] ), axis=1) ] + temp_r += [ np.concatenate (st, axis=1) ] + result += [ ('AMP morph 1.0', np.concatenate (temp_r, axis=0 )), ] + # result += [ ('AMP morph 1.0', np.concatenate (st, axis=0 )), ] + st = [] + temp_r = [] + for i in range(n_samples if not for_history else 1): + st = [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ] st += [ np.concatenate ((SD_065[i], SD_075[i], SD_100[i]), axis=1) ] - result += [ ('AMP morph list', np.concatenate (st, axis=0 )), ] - st = [] - for i in range(n_samples): - st += [ np.concatenate ((DD[i], SD_025[i]*DDM_025[i]*SDM_025[i], SD_050[i]*DDM_050[i]*SDM_050[i]), axis=1) ] + temp_r += [ np.concatenate (st, axis=1) ] + result += [ ('AMP morph list', np.concatenate (temp_r, axis=0 )), ] + #result += [ ('AMP morph list', np.concatenate (st, axis=0 )), ] + st = [] + temp_r = [] + for i in range(n_samples if not for_history else 1): + st = [ np.concatenate ((DD[i], SD_025[i]*DDM_025[i]*SDM_025[i], SD_050[i]*DDM_050[i]*SDM_050[i]), axis=1) ] st += [ np.concatenate ((SD_065[i]*DDM_065[i]*SDM_065[i], SD_075[i]*DDM_075[i]*SDM_075[i], SD_100[i]*DDM_100[i]*SDM_100[i]), axis=1) ] - result += [ ('AMP morph list masked', np.concatenate (st, axis=0 )), ] + temp_r += [ np.concatenate (st, axis=1) ] + result += [ ('AMP morph list masked', np.concatenate (temp_r, axis=0 )), ] + #result += [ ('AMP morph list masked', np.concatenate (st, axis=0 )), ] return result From e6b55e972c45db4a6568c60816f25ca23b58419d Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:11:19 +0100 Subject: [PATCH 07/13] preview images update --- models/Model_SAEHD/Model.py | 6 ++++-- models/Model_SAEHD/config_schema.json | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 17a5de7..be6a9bb 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -979,8 +979,10 @@ class SAEHDModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - #n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) - n_samples = min(self.get_batch_size(), self.options['preview_samples']) + if self.options['preview_samples'] is None: + n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + else: + n_samples = min(self.get_batch_size(), self.options['preview_samples']) if filenames is not None and len(filenames) > 0: for i in range(n_samples): diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index 9533f0c..f42cb93 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -163,7 +163,7 @@ "preview_samples": { "type": "integer", "minimum": 1, - "maximum": 16 + "maximum": 64 }, "session_name": { "type": "string" From cb677f57824ab27ce0f41eb045e0b4e653cebd92 Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:12:55 +0100 Subject: [PATCH 08/13] force complete preview --- models/Model_SAEHD/Model.py | 2 +- models/Model_SAEHD/config_schema.json | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index be6a9bb..375eee7 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -989,7 +989,7 @@ class SAEHDModel(ModelBase): S[i] = label_face_filename(S[i], filenames[0][i]) D[i] = label_face_filename(D[i], filenames[1][i]) - if self.resolution <= 256: + if self.resolution <= 256 or self.options['preview_samples'] == True: result = [] st = [] diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index f42cb93..6f8d262 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -165,6 +165,9 @@ "minimum": 1, "maximum": 64 }, + "force_complete_preview": { + "type": "boolean" + }, "session_name": { "type": "string" }, From 790c888c5e0efc74f0e9e96d92a2f42dfab13b03 Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:17:46 +0100 Subject: [PATCH 09/13] amp force preview sample and prop change --- models/Model_AMP/Model.py | 6 ++++-- models/Model_AMP/config_schema.json | 5 +++++ models/Model_SAEHD/Model.py | 4 ++-- models/Model_SAEHD/config_schema.json | 2 +- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 04cb63a..151da7b 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -933,8 +933,10 @@ class AMPModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - #n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) - n_samples = min(self.get_batch_size(), self.options['preview_samples']) + if self.options['force_preview_samples_num'] is None: + n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + else: + n_samples = min(self.get_batch_size(), self.options['force_preview_samples_num']) result = [] diff --git a/models/Model_AMP/config_schema.json b/models/Model_AMP/config_schema.json index 96ec062..08e8330 100644 --- a/models/Model_AMP/config_schema.json +++ b/models/Model_AMP/config_schema.json @@ -179,6 +179,11 @@ "random_dst_flip": { "type": "boolean" }, + "force_preview_samples_num": { + "type": "integer", + "minimum": 1, + "maximum": 64 + }, "batch_size": { "type": "integer", "minimum": 1 diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 375eee7..a464483 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -979,10 +979,10 @@ class SAEHDModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - if self.options['preview_samples'] is None: + if self.options['force_preview_samples_num'] is None: n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) else: - n_samples = min(self.get_batch_size(), self.options['preview_samples']) + n_samples = min(self.get_batch_size(), self.options['force_preview_samples_num']) if filenames is not None and len(filenames) > 0: for i in range(n_samples): diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index 6f8d262..6af3c28 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -160,7 +160,7 @@ "pretrain": { "type": "boolean" }, - "preview_samples": { + "force_preview_samples_num": { "type": "integer", "minimum": 1, "maximum": 64 From 1b521674c5c760824c80d8bb93e4d36503e6d295 Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:27:47 +0100 Subject: [PATCH 10/13] amp - preview sample fix --- models/Model_AMP/Model.py | 7 ++----- models/Model_AMP/config_schema.json | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 151da7b..34bc9bd 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -69,7 +69,7 @@ class AMPModel(ModelBase): self.ask_session_name() self.ask_maximum_n_backups() self.ask_write_preview_history() - self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_cpu_cap, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 ) + self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_preview_samples, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 ) self.ask_target_iter() self.ask_retraining_samples() self.ask_random_src_flip() @@ -933,10 +933,7 @@ class AMPModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - if self.options['force_preview_samples_num'] is None: - n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) - else: - n_samples = min(self.get_batch_size(), self.options['force_preview_samples_num']) + n_samples = min(self.get_batch_size(), self.options['preview_samples']) result = [] diff --git a/models/Model_AMP/config_schema.json b/models/Model_AMP/config_schema.json index 08e8330..223e197 100644 --- a/models/Model_AMP/config_schema.json +++ b/models/Model_AMP/config_schema.json @@ -179,7 +179,7 @@ "random_dst_flip": { "type": "boolean" }, - "force_preview_samples_num": { + "preview_samples": { "type": "integer", "minimum": 1, "maximum": 64 From f669591b96a02a2e2d20763c877c52dddc270e19 Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:32:01 +0100 Subject: [PATCH 11/13] saehd - preview samples fix --- models/Model_SAEHD/Model.py | 7 ++----- models/Model_SAEHD/config_schema.json | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index a464483..3f7d4dd 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -83,7 +83,7 @@ class SAEHDModel(ModelBase): self.ask_autobackup_hour() self.ask_maximum_n_backups() self.ask_write_preview_history() - self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_cpu_cap, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 ) + self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_preview_samples, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 ) self.ask_target_iter() self.ask_retraining_samples() self.ask_random_src_flip() @@ -979,10 +979,7 @@ class SAEHDModel(ModelBase): target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] - if self.options['force_preview_samples_num'] is None: - n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) - else: - n_samples = min(self.get_batch_size(), self.options['force_preview_samples_num']) + n_samples = min(self.get_batch_size(), self.options['preview_samples']) if filenames is not None and len(filenames) > 0: for i in range(n_samples): diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index 6af3c28..6f8d262 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -160,7 +160,7 @@ "pretrain": { "type": "boolean" }, - "force_preview_samples_num": { + "preview_samples": { "type": "integer", "minimum": 1, "maximum": 64 From bf783a43eb7803b1ab1039abaed80d63164a11cb Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:36:09 +0100 Subject: [PATCH 12/13] saehd - force full preview --- models/Model_SAEHD/Model.py | 3 ++- models/Model_SAEHD/config_schema.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 3f7d4dd..5c3e3b1 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -74,6 +74,7 @@ class SAEHDModel(ModelBase): default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False) default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('cpu_cap', 8) default_preview_samples = self.options['preview_samples'] = self.load_or_def_option('preview_samples', 4) + default_full_preview = self.options['force_full_preview'] = self.load_or_def_option('force_full_preview', False) default_lr_modifier = self.options['lr_modifier'] = self.load_or_def_option('lr_modifier', 0) ask_override = False if self.read_from_conf else self.ask_override() @@ -986,7 +987,7 @@ class SAEHDModel(ModelBase): S[i] = label_face_filename(S[i], filenames[0][i]) D[i] = label_face_filename(D[i], filenames[1][i]) - if self.resolution <= 256 or self.options['preview_samples'] == True: + if self.resolution <= 256 or self.options['force_full_preview'] == True: result = [] st = [] diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index 6f8d262..ddd987a 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -165,7 +165,7 @@ "minimum": 1, "maximum": 64 }, - "force_complete_preview": { + "force_full_preview": { "type": "boolean" }, "session_name": { From 1e84097d68ba64bf4a58449b941a3d19d9e09658 Mon Sep 17 00:00:00 2001 From: seranus Date: Thu, 9 Dec 2021 18:55:44 +0100 Subject: [PATCH 13/13] removed hsv power from amp and saehd --- models/Model_AMP/Model.py | 6 ------ models/Model_AMP/config_schema.json | 5 ----- models/Model_SAEHD/Model.py | 7 +------ models/Model_SAEHD/config_schema.json | 5 ----- 4 files changed, 1 insertion(+), 22 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 34bc9bd..501db23 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -46,7 +46,6 @@ class AMPModel(ModelBase): default_lr_dropout = self.options['lr_dropout'] = self.load_or_def_option('lr_dropout', 'n') default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True) - default_random_hsv_power = self.options['random_hsv_power'] = self.load_or_def_option('random_hsv_power', 0.0) default_random_downsample = self.options['random_downsample'] = self.load_or_def_option('random_downsample', False) default_random_noise = self.options['random_noise'] = self.load_or_def_option('random_noise', False) default_random_blur = self.options['random_blur'] = self.load_or_def_option('random_blur', False) @@ -151,9 +150,6 @@ class AMPModel(ModelBase): self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="") self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="") - - #self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) - self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 5.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 5.0 ) @@ -206,7 +202,6 @@ class AMPModel(ModelBase): morph_factor = self.options['morph_factor'] gan_power = self.gan_power = self.options['gan_power'] random_warp = self.options['random_warp'] - #random_hsv_power = self.options['random_hsv_power'] if 'eyes_mouth_prio' in self.options: self.options.pop('eyes_mouth_prio') @@ -779,7 +774,6 @@ class AMPModel(ModelBase): 'random_blur': self.options['random_blur'], 'random_jpeg': self.options['random_jpeg'], 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, - #'random_hsv_shift_amount' : random_hsv_power, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, diff --git a/models/Model_AMP/config_schema.json b/models/Model_AMP/config_schema.json index 223e197..4bbfba3 100644 --- a/models/Model_AMP/config_schema.json +++ b/models/Model_AMP/config_schema.json @@ -110,11 +110,6 @@ "random_warp": { "type": "boolean" }, - "random_hsv_power": { - "type": "number", - "minimum": 0.0, - "maximum": 0.3 - }, "random_downsample": { "type": "boolean" }, diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 5c3e3b1..d79e92a 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -58,7 +58,6 @@ class SAEHDModel(ModelBase): default_loss_function = self.options['loss_function'] = self.load_or_def_option('loss_function', 'SSIM') default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True) - default_random_hsv_power = self.options['random_hsv_power'] = self.load_or_def_option('random_hsv_power', 0.0) default_random_downsample = self.options['random_downsample'] = self.load_or_def_option('random_downsample', False) default_random_noise = self.options['random_noise'] = self.load_or_def_option('random_noise', False) default_random_blur = self.options['random_blur'] = self.load_or_def_option('random_blur', False) @@ -187,8 +186,6 @@ class SAEHDModel(ModelBase): self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.") - #self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 ) - self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="") self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="") self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="") @@ -278,14 +275,12 @@ class SAEHDModel(ModelBase): random_warp = False if self.pretrain else self.options['random_warp'] random_src_flip = self.random_src_flip if not self.pretrain else True random_dst_flip = self.random_dst_flip if not self.pretrain else True - #random_hsv_power = self.options['random_hsv_power'] if not self.pretrain else 0.0 blur_out_mask = self.options['blur_out_mask'] if self.pretrain: self.options_show_override['lr_dropout'] = 'n' self.options_show_override['random_warp'] = False self.options_show_override['gan_power'] = 0.0 - #self.options_show_override['random_hsv_power'] = 0.0 self.options_show_override['face_style_power'] = 0.0 self.options_show_override['bg_style_power'] = 0.0 self.options_show_override['uniform_yaw'] = True @@ -830,7 +825,7 @@ class SAEHDModel(ModelBase): 'random_noise': self.options['random_noise'], 'random_blur': self.options['random_blur'], 'random_jpeg': self.options['random_jpeg'], - 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, #'random_hsv_shift_amount' : random_hsv_power, + 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, diff --git a/models/Model_SAEHD/config_schema.json b/models/Model_SAEHD/config_schema.json index ddd987a..bc48b01 100644 --- a/models/Model_SAEHD/config_schema.json +++ b/models/Model_SAEHD/config_schema.json @@ -103,11 +103,6 @@ "random_warp": { "type": "boolean" }, - "random_hsv_power": { - "type": "number", - "minimum": 0.0, - "maximum": 0.3 - }, "random_downsample": { "type": "boolean" },