removing fp16 for sae

This commit is contained in:
iperov 2021-07-15 13:22:32 +04:00
parent 7b8991a3cc
commit c9e0ba1779

View file

@ -30,7 +30,7 @@ class SAEHDModel(ModelBase):
min_res = 64
max_res = 640
default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
#default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_resolution = self.options['resolution'] = self.load_or_def_option('resolution', 128)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'f')
default_models_opt_on_gpu = self.options['models_opt_on_gpu'] = self.load_or_def_option('models_opt_on_gpu', True)
@ -69,7 +69,7 @@ class SAEHDModel(ModelBase):
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(suggest_batch_size)
self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
#self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
if self.is_first_run():
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16 and 32 for -d archi.")
@ -219,7 +219,8 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.set_iter(0)
adabelief = self.options['adabelief']
use_fp16 = False#self.options['use_fp16']
self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power']
random_warp = False if self.pretrain else self.options['random_warp']
random_src_flip = self.random_src_flip if not self.pretrain else True
@ -262,7 +263,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape, name='target_dstm_em')
# Initializing model classes
model_archi = nn.DeepFakeArchi(resolution, use_fp16=self.options['use_fp16'], opts=archi_opts)
model_archi = nn.DeepFakeArchi(resolution, use_fp16=use_fp16, opts=archi_opts)
with tf.device (models_opt_device):
if 'df' in archi_type:
@ -303,7 +304,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
if self.is_training:
if gan_power != 0:
self.D_src = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], use_fp16=self.options['use_fp16'], name="D_src")
self.D_src = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], name="D_src")
self.model_filename_list += [ [self.D_src, 'GAN.npy'] ]
# Initialize optimizers