diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index a24e4df..785ee88 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -35,9 +35,9 @@ class SAEModel(ModelBase): default_face_type = 'f' if is_first_run: - resolution = io.input_int("Resolution ( 64-256 ?:help skip:128) : ", default_resolution, + resolution = io.input_int("Resolution ( 16-1024 ?:help skip:128) : ", default_resolution, help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.") - resolution = np.clip(resolution, 64, 256) + resolution = np.clip(resolution, 16, 1024) while np.modf(resolution / 16)[0] != 0.0: resolution -= 1 self.options['resolution'] = resolution @@ -74,20 +74,20 @@ class SAEModel(ModelBase): if is_first_run: self.options['ae_dims'] = np.clip( - io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims), default_ae_dims, + io.input_int("AutoEncoder dims (1-2048 ?:help skip:%d) : " % (default_ae_dims), default_ae_dims, help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU."), - 32, 1024) + 1, 2048) self.options['e_ch_dims'] = np.clip( - io.input_int("Encoder dims per channel (21-85 ?:help skip:%d) : " % (default_e_ch_dims), + io.input_int("Encoder dims per channel (1-128 ?:help skip:%d) : " % (default_e_ch_dims), default_e_ch_dims, help_message="More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU."), - 21, 85) + 1, 128) default_d_ch_dims = self.options['e_ch_dims'] // 2 self.options['d_ch_dims'] = np.clip( - io.input_int("Decoder dims per channel (10-85 ?:help skip:%d) : " % (default_d_ch_dims), + io.input_int("Decoder dims per channel (1-128 ?:help skip:%d) : " % (default_d_ch_dims), default_d_ch_dims, help_message="More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU."), - 10, 85) + 1, 128) self.options['multiscale_decoder'] = io.input_bool("Use multiscale decoder? (y/n, ?:help skip:n) : ", False, help_message="Multiscale decoder helps to get better details.") self.options['ca_weights'] = io.input_bool(