mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 05:22:06 -07:00
refactoring
This commit is contained in:
parent
d2011f8c32
commit
5a9498543d
2 changed files with 7 additions and 8 deletions
|
@ -185,17 +185,17 @@ class RecycleGANModel(ModelBase):
|
||||||
|
|
||||||
if self.is_training_mode:
|
if self.is_training_mode:
|
||||||
t = SampleProcessor.Types
|
t = SampleProcessor.Types
|
||||||
output_sample_types=[ { 'types': (t.IMG_SOURCE, t.MODE_BGR), 'resolution':resolution} ]
|
output_sample_types=[ { 'types': (t.IMG_SOURCE, t.MODE_BGR), 'resolution':resolution, 'normalize_tanh' : True} ]
|
||||||
|
|
||||||
self.set_training_data_generators ([
|
self.set_training_data_generators ([
|
||||||
SampleGeneratorImageTemporal(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
|
SampleGeneratorImageTemporal(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
|
||||||
temporal_image_count=3,
|
temporal_image_count=3,
|
||||||
sample_process_options=SampleProcessor.Options(random_flip = False, normalize_tanh = True),
|
sample_process_options=SampleProcessor.Options(random_flip = False),
|
||||||
output_sample_types=output_sample_types ),
|
output_sample_types=output_sample_types ),
|
||||||
|
|
||||||
SampleGeneratorImageTemporal(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
|
SampleGeneratorImageTemporal(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
|
||||||
temporal_image_count=3,
|
temporal_image_count=3,
|
||||||
sample_process_options=SampleProcessor.Options(random_flip = False, normalize_tanh = True),
|
sample_process_options=SampleProcessor.Options(random_flip = False),
|
||||||
output_sample_types=output_sample_types ),
|
output_sample_types=output_sample_types ),
|
||||||
])
|
])
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -36,10 +36,9 @@ opts:
|
||||||
'MODE_BGR_SHUFFLE' #BGR shuffle
|
'MODE_BGR_SHUFFLE' #BGR shuffle
|
||||||
|
|
||||||
'resolution' : N
|
'resolution' : N
|
||||||
|
|
||||||
'motion_blur' : (chance_int, range) - chance 0..100 to apply to face (not mask), and range [1..3] where 3 is highest power of motion blur
|
'motion_blur' : (chance_int, range) - chance 0..100 to apply to face (not mask), and range [1..3] where 3 is highest power of motion blur
|
||||||
|
|
||||||
'apply_ct' : bool
|
'apply_ct' : bool
|
||||||
|
'normalize_tanh' : bool
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -74,9 +73,8 @@ class SampleProcessor(object):
|
||||||
|
|
||||||
class Options(object):
|
class Options(object):
|
||||||
|
|
||||||
def __init__(self, random_flip = True, normalize_tanh = False, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
|
def __init__(self, random_flip = True, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
|
||||||
self.random_flip = random_flip
|
self.random_flip = random_flip
|
||||||
self.normalize_tanh = normalize_tanh
|
|
||||||
self.rotation_range = rotation_range
|
self.rotation_range = rotation_range
|
||||||
self.scale_range = scale_range
|
self.scale_range = scale_range
|
||||||
self.tx_range = tx_range
|
self.tx_range = tx_range
|
||||||
|
@ -118,6 +116,7 @@ class SampleProcessor(object):
|
||||||
normalize_vgg = opts.get('normalize_vgg', False)
|
normalize_vgg = opts.get('normalize_vgg', False)
|
||||||
motion_blur = opts.get('motion_blur', None)
|
motion_blur = opts.get('motion_blur', None)
|
||||||
apply_ct = opts.get('apply_ct', False)
|
apply_ct = opts.get('apply_ct', False)
|
||||||
|
normalize_tanh = opts.get('normalize_tanh', False)
|
||||||
|
|
||||||
img_type = SPTF.NONE
|
img_type = SPTF.NONE
|
||||||
target_face_type = SPTF.NONE
|
target_face_type = SPTF.NONE
|
||||||
|
@ -246,7 +245,7 @@ class SampleProcessor(object):
|
||||||
img = img_mask
|
img = img_mask
|
||||||
|
|
||||||
if not debug:
|
if not debug:
|
||||||
if sample_process_options.normalize_tanh:
|
if normalize_tanh:
|
||||||
img = np.clip (img * 2.0 - 1.0, -1.0, 1.0)
|
img = np.clip (img * 2.0 - 1.0, -1.0, 1.0)
|
||||||
else:
|
else:
|
||||||
img = np.clip (img, 0.0, 1.0)
|
img = np.clip (img, 0.0, 1.0)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue