diff --git a/facelib/FANSegmentator.py b/facelib/FANSegmentator.py index e8ff70f..fb7f4d5 100644 --- a/facelib/FANSegmentator.py +++ b/facelib/FANSegmentator.py @@ -10,7 +10,7 @@ from interact import interact as io from nnlib import nnlib """ -FANSegmentator is designed to segment faces aligned by 2DFAN-4 landmarks extractor. +FANSegmentator is designed to exclude obstructions from faces such as hair, fingers, etc. Dataset used to train located in official DFL mega.nz folder https://mega.nz/#F!b9MzCK4B!zEAG9txu7uaRUjXz9PtBqg diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index 66c5b45..392002c 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -64,7 +64,7 @@ class SAEModel(ModelBase): self.options['e_ch_dims'] = np.clip ( io.input_int("Encoder dims per channel (21-85 ?:help skip:%d) : " % (default_e_ch_dims) , default_e_ch_dims, help_message="More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 ) default_d_ch_dims = self.options['e_ch_dims'] // 2 self.options['d_ch_dims'] = np.clip ( io.input_int("Decoder dims per channel (10-85 ?:help skip:%d) : " % (default_d_ch_dims) , default_d_ch_dims, help_message="More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU." ), 10, 85 ) - self.options['remove_gray_border'] = io.input_bool ("Remove gray border? (y/n, ?:help skip:n) : ", False, help_message="Removes gray border of predicted face, but requires more computing resources.") + #self.options['remove_gray_border'] = io.input_bool ("Remove gray border? (y/n, ?:help skip:n) : ", False, help_message="Removes gray border of predicted face, but requires more computing resources.") else: self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims) self.options['e_ch_dims'] = self.options.get('e_ch_dims', default_e_ch_dims)