mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 21:12:07 -07:00
Export AMP/SAEHD: added "Export quantized" option
This commit is contained in:
parent
f5cc54177f
commit
83b1412da7
2 changed files with 8 additions and 2 deletions
|
@ -125,7 +125,10 @@ class AMPModel(ModelBase):
|
||||||
if ct_mode == 'none':
|
if ct_mode == 'none':
|
||||||
ct_mode = None
|
ct_mode = None
|
||||||
|
|
||||||
use_fp16 = self.is_exporting
|
use_fp16 = False
|
||||||
|
if self.is_exporting:
|
||||||
|
use_fp16 = io.input_bool ("Export quantized?", False, help_message='Makes the exported model faster. If you have problems, disable this option.')
|
||||||
|
|
||||||
conv_dtype = tf.float16 if use_fp16 else tf.float32
|
conv_dtype = tf.float16 if use_fp16 else tf.float32
|
||||||
|
|
||||||
class Downscale(nn.ModelBase):
|
class Downscale(nn.ModelBase):
|
||||||
|
|
|
@ -219,7 +219,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
||||||
self.set_iter(0)
|
self.set_iter(0)
|
||||||
|
|
||||||
adabelief = self.options['adabelief']
|
adabelief = self.options['adabelief']
|
||||||
use_fp16 = False#self.options['use_fp16']
|
|
||||||
|
use_fp16 = False
|
||||||
|
if self.is_exporting:
|
||||||
|
use_fp16 = io.input_bool ("Export quantized?", False, help_message='Makes the exported model faster. If you have problems, disable this option.')
|
||||||
|
|
||||||
self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power']
|
self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power']
|
||||||
random_warp = False if self.pretrain else self.options['random_warp']
|
random_warp = False if self.pretrain else self.options['random_warp']
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue