From c3b633028467f716cd0c802bf3228203b7356eac Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 5 Feb 2019 15:00:27 +0400 Subject: [PATCH] SAE: now asking face_type after resolution --- models/Model_SAE/Model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index 1ef5ac3..5fefb93 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -28,11 +28,13 @@ class SAEModel(ModelBase): if is_first_run: self.options['resolution'] = input_int("Resolution (64,128 ?:help skip:128) : ", default_resolution, [64,128], help_message="More resolution requires more VRAM.") + self.options['face_type'] = input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower() self.options['archi'] = input_str ("AE architecture (df, liae, ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae'], help_message="DF keeps faces more natural, while LIAE can fix overly different face shapes.").lower() self.options['lighter_encoder'] = input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, sacrificing overall quality.") self.options['learn_mask'] = input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Choose NO to reduce model size. In this case converter forced to use 'not predicted mask' that is not smooth as predicted. Styled SAE can learn without mask and produce same quality fake if you choose high blur value in converter.") else: self.options['resolution'] = self.options.get('resolution', default_resolution) + self.options['face_type'] = self.options.get('face_type', default_face_type) self.options['archi'] = self.options.get('archi', default_archi) self.options['lighter_encoder'] = self.options.get('lighter_encoder', False) self.options['learn_mask'] = self.options.get('learn_mask', True) @@ -62,12 +64,10 @@ class SAEModel(ModelBase): if is_first_run: self.options['ae_dims'] = np.clip ( input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 ) self.options['ed_ch_dims'] = np.clip ( input_int("Encoder/Decoder dims per channel (21-85 ?:help skip:%d) : " % (default_ed_ch_dims) , default_ed_ch_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 ) - self.options['face_type'] = input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower() else: self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims) self.options['ed_ch_dims'] = self.options.get('ed_ch_dims', default_ed_ch_dims) - self.options['face_type'] = self.options.get('face_type', default_face_type) - + #override