'models_opt_on_gpu' option is now available for multigpus (before only for 1 gpu)
max resolution is now 512
This commit is contained in:
Colombo 2020-02-27 16:21:32 +04:00
parent 810dc5ada5
commit 74999ce7ee

View file

@ -58,8 +58,8 @@ class SAEHDModel(ModelBase):
self.ask_batch_size(suggest_batch_size)
if self.is_first_run():
resolution = io.input_int("Resolution", default_resolution, add_info="64-256", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
resolution = np.clip ( (resolution // 16) * 16, 64, 256)
resolution = io.input_int("Resolution", default_resolution, add_info="64-512", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
resolution = np.clip ( (resolution // 16) * 16, 64, 512)
self.options['resolution'] = resolution
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf'], help_message="Half / mid face / full face / whole face. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead, but requires manual merge in Adobe After Effects.").lower()
self.options['archi'] = io.input_str ("AE architecture", default_archi, ['dfhd','liaehd','df','liae'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'hd' is heavyweight version for the best quality.").lower()
@ -67,7 +67,6 @@ class SAEHDModel(ModelBase):
default_d_dims = 48 if self.options['archi'] == 'dfhd' else 64
default_d_dims = self.options['d_dims'] = self.load_or_def_option('d_dims', default_d_dims)
default_d_mask_dims = default_d_dims // 3
default_d_mask_dims += default_d_mask_dims % 2
default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
@ -93,7 +92,6 @@ class SAEHDModel(ModelBase):
self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
if self.is_first_run() or ask_override:
if len(device_config.devices) == 1:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
self.options['lr_dropout'] = io.input_bool ("Use learning rate dropout", default_lr_dropout, help_message="When the face is trained enough, you can enable this option to get extra sharpness for less amount of iterations.")
@ -361,7 +359,7 @@ class SAEHDModel(ModelBase):
masked_training = self.options['masked_training']
models_opt_on_gpu = False if len(devices) == 0 else True if len(devices) > 1 else self.options['models_opt_on_gpu']
models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu']
models_opt_device = '/GPU:0' if models_opt_on_gpu and self.is_training else '/CPU:0'
optimizer_vars_on_cpu = models_opt_device=='/CPU:0'
@ -426,8 +424,8 @@ class SAEHDModel(ModelBase):
if self.is_training:
if gan_power != 0:
self.D_src = nn.PatchDiscriminator(patch_size=resolution//16, in_ch=output_ch, base_ch=512, name="D_src")
self.D_dst = nn.PatchDiscriminator(patch_size=resolution//16, in_ch=output_ch, base_ch=512, name="D_dst")
self.D_src = nn.PatchDiscriminator(patch_size=resolution//16, in_ch=output_ch, name="D_src")
self.D_dst = nn.PatchDiscriminator(patch_size=resolution//16, in_ch=output_ch, name="D_dst")
self.model_filename_list += [ [self.D_src, 'D_src.npy'] ]
self.model_filename_list += [ [self.D_dst, 'D_dst.npy'] ]
@ -833,13 +831,13 @@ class SAEHDModel(ModelBase):
n_samples = min(4, self.get_batch_size(), 800 // self.resolution )
if self.resolution <= 256:
result = []
st = []
for i in range(n_samples):
ar = S[i], SS[i], D[i], DD[i], SD[i]
st.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD', np.concatenate (st, axis=0 )), ]
if self.options['learn_mask']:
@ -849,6 +847,45 @@ class SAEHDModel(ModelBase):
st_m.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD masked', np.concatenate (st_m, axis=0 )), ]
else:
result = []
st = []
for i in range(n_samples):
ar = S[i], SS[i]
st.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD src-src', np.concatenate (st, axis=0 )), ]
st = []
for i in range(n_samples):
ar = D[i], DD[i]
st.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD dst-dst', np.concatenate (st, axis=0 )), ]
st = []
for i in range(n_samples):
ar = D[i], SD[i]
st.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD pred', np.concatenate (st, axis=0 )), ]
if self.options['learn_mask']:
st_m = []
for i in range(n_samples):
ar = S[i]*target_srcm[i], SS[i]
st_m.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD masked src-src', np.concatenate (st_m, axis=0 )), ]
st_m = []
for i in range(n_samples):
ar = D[i]*target_dstm[i], DD[i]*DDM[i]
st_m.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD masked dst-dst', np.concatenate (st_m, axis=0 )), ]
st_m = []
for i in range(n_samples):
ar = D[i]*target_dstm[i], SD[i]*(DDM[i]*SDM[i])
st_m.append ( np.concatenate ( ar, axis=1) )
result += [ ('SAEHD masked pred', np.concatenate (st_m, axis=0 )), ]
return result