refactorings, added motion blur to SampleProcessor for FANSegmentator trainer

This commit is contained in:
iperov 2019-04-07 23:08:00 +04:00
commit 58d7e990f4
9 changed files with 238 additions and 72 deletions

View file

@ -49,15 +49,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
])
#override
def onSave(self):

View file

@ -33,12 +33,12 @@ class Model(ModelBase):
if self.is_training_mode:
f = SampleProcessor.TypeFlags
f_type = f.FACE_ALIGN_FULL
f_type = f.FACE_TYPE_FULL
self.set_training_data_generators ([
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True, normalize_tanh = True ),
output_sample_types=[ [f.TRANSFORMED | f_type | f.MODE_BGR_SHUFFLE, self.resolution],
sample_process_options=SampleProcessor.Options(random_flip=True, motion_blur = [25, 1], normalize_tanh = True ),
output_sample_types=[ [f.TRANSFORMED | f_type | f.MODE_BGR_SHUFFLE | f.OPT_APPLY_MOTION_BLUR, self.resolution],
[f.TRANSFORMED | f_type | f.MODE_M | f.FACE_MASK_FULL, self.resolution]
]),

View file

@ -59,15 +59,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
])
#override

View file

@ -60,15 +60,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] )
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] )
])
#override

View file

@ -56,15 +56,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
])
#override

View file

@ -320,7 +320,7 @@ class SAEModel(ModelBase):
self.dst_sample_losses = []
f = SampleProcessor.TypeFlags
face_type = f.FACE_ALIGN_FULL if self.options['face_type'] == 'f' else f.FACE_ALIGN_HALF
face_type = f.FACE_TYPE_FULL if self.options['face_type'] == 'f' else f.FACE_TYPE_HALF
output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution] ]
output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_BGR, resolution // (2**i) ] for i in range(ms_count)]