Converter: added Apply super resolution? (y/n skip:n) : , Enhance details by applying DCSCN network.

refactorings
This commit is contained in:
iperov 2019-03-28 21:50:27 +04:00
commit 85c01e3b4a
12 changed files with 271 additions and 77 deletions

View file

@ -33,12 +33,16 @@ class Model(ModelBase):
]
self.load_weights_safe(weights_to_load)
self.autoencoder_src = Model([ae_input_layer,mask_layer], self.decoder_src(self.encoder(ae_input_layer)))
self.autoencoder_dst = Model([ae_input_layer,mask_layer], self.decoder_dst(self.encoder(ae_input_layer)))
rec_src = self.decoder_src(self.encoder(ae_input_layer))
rec_dst = self.decoder_dst(self.encoder(ae_input_layer))
self.autoencoder_src = Model([ae_input_layer,mask_layer], rec_src)
self.autoencoder_dst = Model([ae_input_layer,mask_layer], rec_dst)
self.autoencoder_src.compile(optimizer=Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[DSSIMMSEMaskLoss(mask_layer, is_mse=self.options['pixel_loss']), 'mse'] )
self.autoencoder_dst.compile(optimizer=Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[DSSIMMSEMaskLoss(mask_layer, is_mse=self.options['pixel_loss']), 'mse'] )
self.convert = K.function([ae_input_layer], rec_src)
if self.is_training_mode:
f = SampleProcessor.TypeFlags
self.set_training_data_generators ([
@ -103,21 +107,14 @@ class Model(ModelBase):
return [ ('DF', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
face_128_bgr = face[...,0:3]
face_128_mask = np.expand_dims(face[...,3],-1)
x, mx = self.autoencoder_src.predict ( [ np.expand_dims(face_128_bgr,0), np.expand_dims(face_128_mask,0) ] )
x, mx = x[0], mx[0]
return np.concatenate ( (x,mx), -1 )
x, mx = self.convert ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
output_size=128,
face_type=FaceType.FULL,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0)

View file

@ -116,20 +116,14 @@ class Model(ModelBase):
return [ ('H128', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
face_128_bgr = face[...,0:3]
face_128_mask = np.expand_dims(face[...,3],-1)
x, mx = self.src_view ( [ np.expand_dims(face_128_bgr,0) ] )
x, mx = x[0], mx[0]
return np.concatenate ( (x,mx), -1 )
x, mx = self.src_view ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
output_size=128,
face_type=FaceType.HALF,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100)

View file

@ -117,21 +117,14 @@ class Model(ModelBase):
return [ ('H64', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
face_64_bgr = face[...,0:3]
face_64_mask = np.expand_dims(face[...,3],-1)
x, mx = self.src_view ( [ np.expand_dims(face_64_bgr,0) ] )
x, mx = x[0], mx[0]
return np.concatenate ( (x,mx), -1 )
x, mx = self.src_view ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=64,
output_size=64,
face_type=FaceType.HALF,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100)

View file

@ -37,12 +37,17 @@ class Model(ModelBase):
code = self.encoder(ae_input_layer)
AB = self.inter_AB(code)
B = self.inter_B(code)
self.autoencoder_src = Model([ae_input_layer,mask_layer], self.decoder(Concatenate()([AB, AB])) )
self.autoencoder_dst = Model([ae_input_layer,mask_layer], self.decoder(Concatenate()([B, AB])) )
rec_src = self.decoder(Concatenate()([AB, AB]))
rec_dst = self.decoder(Concatenate()([B, AB]))
self.autoencoder_src = Model([ae_input_layer,mask_layer], rec_src )
self.autoencoder_dst = Model([ae_input_layer,mask_layer], rec_dst )
self.autoencoder_src.compile(optimizer=Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[DSSIMMSEMaskLoss(mask_layer, is_mse=self.options['pixel_loss']), 'mse'] )
self.autoencoder_dst.compile(optimizer=Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[DSSIMMSEMaskLoss(mask_layer, is_mse=self.options['pixel_loss']), 'mse'] )
self.convert = K.function([ae_input_layer],rec_src)
if self.is_training_mode:
f = SampleProcessor.TypeFlags
self.set_training_data_generators ([
@ -111,21 +116,14 @@ class Model(ModelBase):
return [ ('LIAEF128', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
face_128_bgr = face[...,0:3]
face_128_mask = np.expand_dims(face[...,3],-1)
x, mx = self.autoencoder_src.predict ( [ np.expand_dims(face_128_bgr,0), np.expand_dims(face_128_mask,0) ] )
x, mx = x[0], mx[0]
return np.concatenate ( (x,mx), -1 )
x, mx = self.convert ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
output_size=128,
face_type=FaceType.FULL,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0)

View file

@ -406,13 +406,12 @@ class SAEModel(ModelBase):
return [ ('SAE', np.concatenate (st, axis=0 )), ]
def predictor_func (self, face):
prd = [ x[0] for x in self.AE_convert ( [ face[np.newaxis,:,:,0:3] ] ) ]
if not self.options['learn_mask']:
prd += [ face[...,3:4] ]
return np.concatenate ( prd, -1 )
if self.options['learn_mask']:
bgr, mask = self.AE_convert ([face[np.newaxis,...]])
return bgr[0], mask[0][...,0]
else:
bgr, = self.AE_convert ([face[np.newaxis,...]])
return bgr[0]
#override
def get_converter(self):
@ -428,7 +427,7 @@ class SAEModel(ModelBase):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=self.options['resolution'],
output_size=self.options['resolution'],
predictor_masked=self.options['learn_mask'],
face_type=face_type,
default_mode = 1 if self.options['face_style_power'] or self.options['bg_style_power'] else 4,
base_erode_mask_modifier=base_erode_mask_modifier,