mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 06:23:20 -07:00
cpu_cap, preview, learning rate options
This commit is contained in:
parent
0c10249bb2
commit
8ed0e9a72f
3 changed files with 38 additions and 14 deletions
|
@ -58,6 +58,7 @@ class AMPModel(ModelBase):
|
|||
default_random_color = self.options['random_color'] = self.load_or_def_option('random_color', False)
|
||||
default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False)
|
||||
default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
|
||||
default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('default_cpu_cap', 8)
|
||||
|
||||
ask_override = False if self.read_from_conf else self.ask_override()
|
||||
if self.is_first_run() or ask_override:
|
||||
|
@ -72,6 +73,7 @@ class AMPModel(ModelBase):
|
|||
self.ask_random_dst_flip()
|
||||
self.ask_batch_size(8)
|
||||
self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
|
||||
self.options['cpu_cap'] = np.clip ( io.input_int ("Max cpu cores to use.", default_cpu_cap, add_info="1 - 256", help_message="Typical fine value is 0.5"), 1, 256 )
|
||||
|
||||
|
||||
|
||||
|
@ -107,6 +109,8 @@ class AMPModel(ModelBase):
|
|||
if self.is_first_run() or ask_override:
|
||||
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
|
||||
|
||||
|
||||
|
||||
morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="Typical fine value is 0.5"), 0.1, 0.5 )
|
||||
self.options['morph_factor'] = morph_factor
|
||||
|
||||
|
@ -142,7 +146,7 @@ class AMPModel(ModelBase):
|
|||
self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="")
|
||||
self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="")
|
||||
|
||||
self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
|
||||
#self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
|
||||
|
||||
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 5.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 5.0 )
|
||||
|
||||
|
@ -196,7 +200,7 @@ class AMPModel(ModelBase):
|
|||
morph_factor = self.options['morph_factor']
|
||||
gan_power = self.gan_power = self.options['gan_power']
|
||||
random_warp = self.options['random_warp']
|
||||
random_hsv_power = self.options['random_hsv_power']
|
||||
#random_hsv_power = self.options['random_hsv_power']
|
||||
|
||||
if 'eyes_mouth_prio' in self.options:
|
||||
self.options.pop('eyes_mouth_prio')
|
||||
|
@ -741,7 +745,7 @@ class AMPModel(ModelBase):
|
|||
|
||||
random_ct_samples_path=training_data_dst_path if ct_mode is not None else None #and not self.pretrain
|
||||
|
||||
cpu_count = multiprocessing.cpu_count()
|
||||
cpu_count = min(multiprocessing.cpu_count(), self.options['cpu_cap'])
|
||||
src_generators_count = cpu_count // 2
|
||||
dst_generators_count = cpu_count // 2
|
||||
if ct_mode is not None:
|
||||
|
@ -762,7 +766,7 @@ class AMPModel(ModelBase):
|
|||
'random_blur': self.options['random_blur'],
|
||||
'random_jpeg': self.options['random_jpeg'],
|
||||
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode,
|
||||
'random_hsv_shift_amount' : random_hsv_power,
|
||||
#'random_hsv_shift_amount' : random_hsv_power,
|
||||
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False,
|
||||
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode,
|
||||
|
|
|
@ -72,7 +72,9 @@ class SAEHDModel(ModelBase):
|
|||
default_random_color = self.options['random_color'] = self.load_or_def_option('random_color', False)
|
||||
default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False)
|
||||
default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False)
|
||||
#default_use_fp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
|
||||
default_cpu_cap = self.options['cpu_cap'] = self.load_or_def_option('cpu_cap', 8)
|
||||
default_preview_samples = self.options['default_preview_samples'] = self.load_or_def_option('preview_samples', 4)
|
||||
default_lr_modifier = self.options['lr_modifier'] = self.load_or_def_option('lr_modifier', 0)
|
||||
|
||||
ask_override = False if self.read_from_conf else self.ask_override()
|
||||
if self.is_first_run() or ask_override:
|
||||
|
@ -81,12 +83,15 @@ class SAEHDModel(ModelBase):
|
|||
self.ask_autobackup_hour()
|
||||
self.ask_maximum_n_backups()
|
||||
self.ask_write_preview_history()
|
||||
self.options['preview_samples'] = np.clip ( io.input_int ("Number of samples to preview", default_cpu_cap, add_info="1 - 16", help_message="Typical fine value is 4"), 1, 16 )
|
||||
self.ask_target_iter()
|
||||
self.ask_retraining_samples()
|
||||
self.ask_random_src_flip()
|
||||
self.ask_random_dst_flip()
|
||||
self.ask_batch_size(suggest_batch_size)
|
||||
self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
|
||||
self.options['cpu_cap'] = np.clip ( io.input_int ("Max cpu cores to use.", default_cpu_cap, add_info="1 - 256", help_message="Typical fine value is 8"), 1, 256 )
|
||||
|
||||
|
||||
if self.is_first_run():
|
||||
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
|
||||
|
@ -177,9 +182,11 @@ class SAEHDModel(ModelBase):
|
|||
self.options['loss_function'] = io.input_str(f"Loss function", default_loss_function, ['SSIM', 'MS-SSIM', 'MS-SSIM+L1'],
|
||||
help_message="Change loss function used for image quality assessment.")
|
||||
|
||||
self.options['lr_modifier'] = np.clip (io.input_int("Learningrate factor", default_lr_modifier, add_info="-100 .. 100", help_message="Modify the Learning rate: 100 == multipy by 4, -100 == divide by 4"), -100, 100)
|
||||
|
||||
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
|
||||
|
||||
self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
|
||||
#self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
|
||||
|
||||
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
|
||||
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
|
||||
|
@ -270,14 +277,14 @@ class SAEHDModel(ModelBase):
|
|||
random_warp = False if self.pretrain else self.options['random_warp']
|
||||
random_src_flip = self.random_src_flip if not self.pretrain else True
|
||||
random_dst_flip = self.random_dst_flip if not self.pretrain else True
|
||||
random_hsv_power = self.options['random_hsv_power'] if not self.pretrain else 0.0
|
||||
#random_hsv_power = self.options['random_hsv_power'] if not self.pretrain else 0.0
|
||||
blur_out_mask = self.options['blur_out_mask']
|
||||
|
||||
if self.pretrain:
|
||||
self.options_show_override['lr_dropout'] = 'n'
|
||||
self.options_show_override['random_warp'] = False
|
||||
self.options_show_override['gan_power'] = 0.0
|
||||
self.options_show_override['random_hsv_power'] = 0.0
|
||||
#self.options_show_override['random_hsv_power'] = 0.0
|
||||
self.options_show_override['face_style_power'] = 0.0
|
||||
self.options_show_override['bg_style_power'] = 0.0
|
||||
self.options_show_override['uniform_yaw'] = True
|
||||
|
@ -360,7 +367,14 @@ class SAEHDModel(ModelBase):
|
|||
self.model_filename_list += [ [self.D_src, 'GAN.npy'] ]
|
||||
|
||||
# Initialize optimizers
|
||||
lr=5e-5
|
||||
lr_modifier = self.options['lr_modifier']
|
||||
if lr_modifier == 0:
|
||||
lr = 5e-5
|
||||
elif lr_modifier > 0:
|
||||
lr = 5e-5 / abs( lr_modifier * 4/100 )
|
||||
else:
|
||||
lr = 5e-5 * abs( lr_modifier * 4/100 )
|
||||
|
||||
if self.options['lr_dropout'] in ['y','cpu'] and not self.pretrain:
|
||||
lr_cos = 500
|
||||
lr_dropout = 0.3
|
||||
|
@ -795,7 +809,7 @@ class SAEHDModel(ModelBase):
|
|||
|
||||
random_ct_samples_path=training_data_dst_path if ct_mode is not None and not self.pretrain else None
|
||||
|
||||
cpu_count = multiprocessing.cpu_count()
|
||||
cpu_count = min(multiprocessing.cpu_count(), self.options['cpu_cap'])
|
||||
src_generators_count = cpu_count // 2
|
||||
dst_generators_count = cpu_count // 2
|
||||
if ct_mode is not None:
|
||||
|
@ -815,7 +829,7 @@ class SAEHDModel(ModelBase):
|
|||
'random_noise': self.options['random_noise'],
|
||||
'random_blur': self.options['random_blur'],
|
||||
'random_jpeg': self.options['random_jpeg'],
|
||||
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'random_hsv_shift_amount' : random_hsv_power,
|
||||
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, #'random_hsv_shift_amount' : random_hsv_power,
|
||||
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
|
@ -965,7 +979,8 @@ class SAEHDModel(ModelBase):
|
|||
|
||||
target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )]
|
||||
|
||||
n_samples = min(4, self.get_batch_size(), 800 // self.resolution )
|
||||
#n_samples = min(4, self.get_batch_size(), 800 // self.resolution )
|
||||
n_samples = min(self.get_batch_size(), self.options['preview_samples'])
|
||||
|
||||
if filenames is not None and len(filenames) > 0:
|
||||
for i in range(n_samples):
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"definitions": {
|
||||
"dfl_config": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"use_fp16": {
|
||||
"type": "boolean"
|
||||
|
@ -13,6 +13,11 @@
|
|||
"type": "string",
|
||||
"pattern": "^(df|liae)-(\\b(?!\\w*(\\w)\\w*\\1)[udtc]+\\b)+|^(df|liae)$"
|
||||
},
|
||||
"cpu_cap": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"maximum": 256
|
||||
},
|
||||
"resolution": {
|
||||
"type": "integer",
|
||||
"minimum": 64,
|
||||
|
@ -213,6 +218,7 @@
|
|||
}
|
||||
},
|
||||
"required": [
|
||||
"cpu_cap",
|
||||
"adabelief",
|
||||
"ae_dims",
|
||||
"archi",
|
||||
|
@ -246,7 +252,6 @@
|
|||
"random_color",
|
||||
"random_downsample",
|
||||
"random_dst_flip",
|
||||
"random_hsv_power",
|
||||
"random_jpeg",
|
||||
"random_noise",
|
||||
"random_src_flip",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue