Merge pull request #47 from faceshiftlabs/feat/no-training-wheels

Less restrictions on resolution and dimensions
This commit is contained in:
Jeremy Hummel 2019-08-27 12:12:17 -07:00 committed by GitHub
commit 9d5a5e5c98
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -35,9 +35,9 @@ class SAEModel(ModelBase):
default_face_type = 'f'
if is_first_run:
resolution = io.input_int("Resolution ( 64-256 ?:help skip:128) : ", default_resolution,
resolution = io.input_int("Resolution ( 16-1024 ?:help skip:128) : ", default_resolution,
help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
resolution = np.clip(resolution, 64, 256)
resolution = np.clip(resolution, 16, 1024)
while np.modf(resolution / 16)[0] != 0.0:
resolution -= 1
self.options['resolution'] = resolution
@ -74,20 +74,20 @@ class SAEModel(ModelBase):
if is_first_run:
self.options['ae_dims'] = np.clip(
io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims), default_ae_dims,
io.input_int("AutoEncoder dims (1-2048 ?:help skip:%d) : " % (default_ae_dims), default_ae_dims,
help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU."),
32, 1024)
1, 2048)
self.options['e_ch_dims'] = np.clip(
io.input_int("Encoder dims per channel (21-85 ?:help skip:%d) : " % (default_e_ch_dims),
io.input_int("Encoder dims per channel (1-128 ?:help skip:%d) : " % (default_e_ch_dims),
default_e_ch_dims,
help_message="More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU."),
21, 85)
1, 128)
default_d_ch_dims = self.options['e_ch_dims'] // 2
self.options['d_ch_dims'] = np.clip(
io.input_int("Decoder dims per channel (10-85 ?:help skip:%d) : " % (default_d_ch_dims),
io.input_int("Decoder dims per channel (1-128 ?:help skip:%d) : " % (default_d_ch_dims),
default_d_ch_dims,
help_message="More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU."),
10, 85)
1, 128)
self.options['multiscale_decoder'] = io.input_bool("Use multiscale decoder? (y/n, ?:help skip:n) : ", False,
help_message="Multiscale decoder helps to get better details.")
self.options['ca_weights'] = io.input_bool(