Converter:

Session is now saved to the model folder.

blur and erode ranges are increased to -400+400

hist-match-bw is now replaced with seamless2 mode.

Added 'ebs' color transfer mode (works only on Windows).

FANSEG model (used in FAN-x mask modes) is retrained with new model configuration
and now produces better precision and less jitter
This commit is contained in:
Colombo 2019-09-07 13:57:42 +04:00
parent 70dada42ea
commit 7ed38a8097
29 changed files with 768 additions and 314 deletions

View file

@ -117,9 +117,9 @@ class ModelBase(object):
if ask_batch_size and (self.iter == 0 or ask_override):
default_batch_size = 0 if self.iter == 0 else self.options.get('batch_size',0)
self.options['batch_size'] = max(0, io.input_int("Batch_size (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message="Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."))
self.batch_size = max(0, io.input_int("Batch_size (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message="Larger batch size is better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."))
else:
self.options['batch_size'] = self.options.get('batch_size', 0)
self.batch_size = self.options.get('batch_size', 0)
if ask_sort_by_yaw:
if (self.iter == 0 or ask_override):
@ -152,7 +152,7 @@ class ModelBase(object):
if self.target_iter == 0 and 'target_iter' in self.options:
self.options.pop('target_iter')
self.batch_size = self.options.get('batch_size',0)
#self.batch_size = self.options.get('batch_size',0)
self.sort_by_yaw = self.options.get('sort_by_yaw',False)
self.random_flip = self.options.get('random_flip',True)
@ -325,14 +325,9 @@ class ModelBase(object):
#overridable
def get_ConverterConfig(self):
#return ConverterConfig() for the model
#return predictor_func, predictor_input_shape, ConverterConfig() for the model
raise NotImplementedError
#overridable
def get_converter(self):
raise NotImplementedError
#return existing or your own converter which derived from base
def get_target_iter(self):
return self.target_iter

View file

@ -353,10 +353,7 @@ class AVATARModel(ModelBase):
#override
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigFaceAvatar(predictor_func=self.predictor_func,
predictor_input_shape=(self.df_res, self.df_res, 3),
temporal_face_count=1
)
return self.predictor_func, (self.df_res, self.df_res, 3), converters.ConverterConfigFaceAvatar(temporal_face_count=1)
@staticmethod
def NLayerDiscriminator(ndf=64, n_layers=3):

View file

@ -48,7 +48,7 @@ class Model(ModelBase):
self.set_training_data_generators ([
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True),
output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR_SHUFFLE), 'resolution' : self.resolution, 'motion_blur':(25, 1) },
output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR_SHUFFLE), 'resolution' : self.resolution, 'motion_blur':(25, 5), 'border_replicate':False },
{ 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_M), 'resolution': self.resolution },
]),
@ -66,7 +66,7 @@ class Model(ModelBase):
def onTrainOneIter(self, generators_samples, generators_list):
target_src, target_src_mask = generators_samples[0]
loss = self.fan_seg.train_on_batch( [target_src], [target_src_mask] )
loss = self.fan_seg.train( target_src, target_src_mask )
return ( ('loss', loss), )

View file

@ -121,16 +121,7 @@ class Model(ModelBase):
#override
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(128,128,3),
predictor_masked=True,
face_type=FaceType.FULL,
default_mode=4,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
return self.predictor_func, (128,128,3), converters.ConverterConfigMasked(face_type=FaceType.FULL, default_mode=4)
def Build(self, input_layer):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -129,16 +129,7 @@ class Model(ModelBase):
#override
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(128,128,3),
predictor_masked=True,
face_type=FaceType.HALF,
default_mode=4,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
return self.predictor_func, (128,128,3), converters.ConverterConfigMasked(face_type=FaceType.HALF, default_mode=4)
def Build(self, lighter_ae):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -130,16 +130,7 @@ class Model(ModelBase):
#override
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(64,64,3),
predictor_masked=True,
face_type=FaceType.HALF,
default_mode=4,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
return self.predictor_func, (64,64,3), converters.ConverterConfigMasked(face_type=FaceType.HALF, default_mode=4)
def Build(self, lighter_ae):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -127,16 +127,7 @@ class Model(ModelBase):
#override
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(128,128,3),
predictor_masked=True,
face_type=FaceType.FULL,
default_mode=4,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
return self.predictor_func, (128,128,3), converters.ConverterConfigMasked(face_type=FaceType.FULL, default_mode=4)
def Build(self, input_layer):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -483,25 +483,11 @@ class SAEModel(ModelBase):
#override
def get_ConverterConfig(self):
base_erode_mask_modifier = 30 if self.options['face_type'] == 'f' else 100
base_blur_mask_modifier = 0 if self.options['face_type'] == 'f' else 100
default_erode_mask_modifier = 0
default_blur_mask_modifier = 100 if (self.options['face_style_power'] or self.options['bg_style_power']) and \
self.options['face_type'] == 'f' else 0
face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(self.options['resolution'], self.options['resolution'], 3),
predictor_masked=self.options['learn_mask'],
face_type=face_type,
return self.predictor_func, (self.options['resolution'], self.options['resolution'], 3), converters.ConverterConfigMasked(face_type=face_type,
default_mode = 1 if self.options['apply_random_ct'] or self.options['face_style_power'] or self.options['bg_style_power'] else 4,
base_erode_mask_modifier=base_erode_mask_modifier,
base_blur_mask_modifier=base_blur_mask_modifier,
default_erode_mask_modifier=default_erode_mask_modifier,
default_blur_mask_modifier=default_blur_mask_modifier,
clip_hborder_mask_per=0.0625 if (self.options['face_type'] == 'f') else 0,
)