AVATAR fixes

This commit is contained in:
iperov 2018-06-06 18:43:11 +04:00
parent 612ef5155e
commit 4aaac5e42e

View file

@ -12,7 +12,7 @@ class Model(ModelBase):
decoder64_srcH5 = 'decoder64_src.h5' decoder64_srcH5 = 'decoder64_src.h5'
decoder64_dstH5 = 'decoder64_dst.h5' decoder64_dstH5 = 'decoder64_dst.h5'
encoder256H5 = 'encoder256.h5' encoder256H5 = 'encoder256.h5'
decoder256_srcH5 = 'decoder256_src.h5' decoder256H5 = 'decoder256.h5'
#override #override
def onInitialize(self, **in_options): def onInitialize(self, **in_options):
@ -20,71 +20,79 @@ class Model(ModelBase):
keras = self.keras keras = self.keras
K = keras.backend K = keras.backend
self.set_vram_batch_requirements( {4:8,5:16,6:20,7:24,8:32,9:48} ) self.set_vram_batch_requirements( {3.5:8,4:8,5:12,6:16,7:24,8:32,9:48} )
if self.batch_size < 4:
self.batch_size = 4
self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256_src = self.BuildAE() img_shape64, img_shape256, self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256 = self.Build()
img_shape64 = (64,64,1)
img_shape256 = (256,256,3)
if not self.is_first_run(): if not self.is_first_run():
self.encoder64.load_weights (self.get_strpath_storage_for_file(self.encoder64H5)) self.encoder64.load_weights (self.get_strpath_storage_for_file(self.encoder64H5))
self.decoder64_src.load_weights (self.get_strpath_storage_for_file(self.decoder64_srcH5)) self.decoder64_src.load_weights (self.get_strpath_storage_for_file(self.decoder64_srcH5))
self.decoder64_dst.load_weights (self.get_strpath_storage_for_file(self.decoder64_dstH5)) self.decoder64_dst.load_weights (self.get_strpath_storage_for_file(self.decoder64_dstH5))
self.encoder256.load_weights (self.get_strpath_storage_for_file(self.encoder256H5)) self.encoder256.load_weights (self.get_strpath_storage_for_file(self.encoder256H5))
self.decoder256_src.load_weights (self.get_strpath_storage_for_file(self.decoder256_srcH5)) self.decoder256.load_weights (self.get_strpath_storage_for_file(self.decoder256H5))
if self.is_training_mode: if self.is_training_mode:
self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256_src = self.to_multi_gpu_model_if_possible ( [self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256_src] ) self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256 = self.to_multi_gpu_model_if_possible ( [self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256] )
input_src_64 = keras.layers.Input(img_shape64) input_A_warped64 = keras.layers.Input(img_shape64)
input_src_target64 = keras.layers.Input(img_shape64) input_A_target64 = keras.layers.Input(img_shape64)
input_src_target256 = keras.layers.Input(img_shape256) input_B_warped64 = keras.layers.Input(img_shape64)
input_dst_64 = keras.layers.Input(img_shape64) input_B_target64 = keras.layers.Input(img_shape64)
input_dst_target64 = keras.layers.Input(img_shape64)
src_code64 = self.encoder64(input_src_64) A_code64 = self.encoder64(input_A_warped64)
dst_code64 = self.encoder64(input_dst_64) B_code64 = self.encoder64(input_B_warped64)
rec_src64 = self.decoder64_src(src_code64) A_rec64 = self.decoder64_src(A_code64)
rec_dst64 = self.decoder64_dst(dst_code64) B_rec64 = self.decoder64_dst(B_code64)
src64_loss = tf_dssim(tf, input_src_target64, rec_src64) A64_loss = tf_dssim(tf, input_A_target64, A_rec64)
dst64_loss = tf_dssim(tf, input_dst_target64, rec_dst64) B64_loss = tf_dssim(tf, input_B_target64, B_rec64)
total64_loss = src64_loss + dst64_loss total64_loss = A64_loss + B64_loss
self.ed64_train = K.function ([input_src_64, input_src_target64, input_dst_64, input_dst_target64],[K.mean(total64_loss)], self.ed64_train = K.function ([input_A_warped64, input_A_target64, input_B_warped64, input_B_target64],[K.mean(total64_loss)],
self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999).get_updates(total64_loss, self.encoder64.trainable_weights + self.decoder64_src.trainable_weights + self.decoder64_dst.trainable_weights) self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999).get_updates(total64_loss, self.encoder64.trainable_weights + self.decoder64_src.trainable_weights + self.decoder64_dst.trainable_weights)
) )
self.A64_view = K.function ([input_A_warped64], [A_rec64])
self.B64_view = K.function ([input_B_warped64], [B_rec64])
src_code256 = self.encoder256(input_src_64) input_A_warped64 = keras.layers.Input(img_shape64)
rec_src256 = self.decoder256_src(src_code256) input_A_target256 = keras.layers.Input(img_shape256)
src256_loss = tf_dssim(tf, input_src_target256, rec_src256) A_code256 = self.encoder256(input_A_warped64)
A_rec256 = self.decoder256(A_code256)
self.ed256_train = K.function ([input_src_64, input_src_target256],[K.mean(src256_loss)], input_B_warped64 = keras.layers.Input(img_shape64)
self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999).get_updates(src256_loss, self.encoder256.trainable_weights + self.decoder256_src.trainable_weights) B_code64 = self.encoder64(input_B_warped64)
BA_rec64 = self.decoder64_src(B_code64)
BA_code256 = self.encoder256(BA_rec64)
BA_rec256 = self.decoder256(BA_code256)
total256_loss = K.mean( tf_dssim(tf, input_A_target256, A_rec256) )
self.ed256_train = K.function ([input_A_warped64, input_A_target256],[total256_loss],
self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999).get_updates(total256_loss, self.encoder256.trainable_weights + self.decoder256.trainable_weights)
) )
src_code256 = self.encoder256(rec_src64) self.A256_view = K.function ([input_A_warped64], [A_rec256])
rec_src256 = self.decoder256_src(src_code256) self.BA256_view = K.function ([input_B_warped64], [BA_rec256])
self.src256_view = K.function ([input_src_64], [rec_src256])
if self.is_training_mode: if self.is_training_mode:
from models import TrainingDataGenerator from models import TrainingDataGenerator
f = TrainingDataGenerator.SampleTypeFlags f = TrainingDataGenerator.SampleTypeFlags
self.set_training_data_generators ([ self.set_training_data_generators ([
TrainingDataGenerator(TrainingDataType.FACE, self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ TrainingDataGenerator(TrainingDataType.FACE, self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[
[f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_G, 64], [f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64],
[f.TRANSFORMED | f.HALF_FACE | f.MODE_G, 64], [f.TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 256], [f.TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 256],
[f.SOURCE | f.HALF_FACE | f.MODE_G, 64], [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 64],
[f.SOURCE | f.HALF_FACE | f.MODE_GGG, 256] ] ), [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 256] ] ),
TrainingDataGenerator(TrainingDataType.FACE, self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ TrainingDataGenerator(TrainingDataType.FACE, self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[
[f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_G, 64], [f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64],
[f.TRANSFORMED | f.HALF_FACE | f.MODE_G, 64], [f.TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64],
[f.SOURCE | f.HALF_FACE | f.MODE_G, 64], [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 64],
[f.SOURCE | f.HALF_FACE | f.MODE_GGG, 256] ] ) [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 256] ] )
]) ])
#override #override
def onSave(self): def onSave(self):
@ -92,73 +100,100 @@ class Model(ModelBase):
[self.decoder64_src, self.get_strpath_storage_for_file(self.decoder64_srcH5)], [self.decoder64_src, self.get_strpath_storage_for_file(self.decoder64_srcH5)],
[self.decoder64_dst, self.get_strpath_storage_for_file(self.decoder64_dstH5)], [self.decoder64_dst, self.get_strpath_storage_for_file(self.decoder64_dstH5)],
[self.encoder256, self.get_strpath_storage_for_file(self.encoder256H5)], [self.encoder256, self.get_strpath_storage_for_file(self.encoder256H5)],
[self.decoder256_src, self.get_strpath_storage_for_file(self.decoder256_srcH5)], [self.decoder256, self.get_strpath_storage_for_file(self.decoder256H5)],
] ) ] )
#override #override
def onTrainOneEpoch(self, sample): def onTrainOneEpoch(self, sample):
warped_src64, target_src64, target_src256, target_src_source64_G, target_src_source256_GGG = sample[0] warped_src64, target_src64, target_src256, target_src_source64, target_src_source256 = sample[0]
warped_dst64, target_dst64, target_dst_source64_G, target_dst_source256_GGG = sample[1] warped_dst64, target_dst64, target_dst_source64, target_dst_source256 = sample[1]
loss64, = self.ed64_train ([warped_src64, target_src64, warped_dst64, target_dst64]) loss64, = self.ed64_train ([warped_src64, target_src64, warped_dst64, target_dst64])
loss256, = self.ed256_train ([warped_src64, target_src256]) loss256, = self.ed256_train ([warped_src64, target_src256])
return ( ('loss64', loss64), ('loss256', loss256) ) return ( ('loss64', loss64), ('loss256', loss256), )
#override #override
def onGetPreview(self, sample): def onGetPreview(self, sample):
n_samples = 4 sample_src64_source = sample[0][3][0:4]
test_B = sample[1][2][0:n_samples] sample_src256_source = sample[0][4][0:4]
test_B256 = sample[1][3][0:n_samples]
BB, = self.src256_view ([test_B]) sample_dst64_source = sample[1][2][0:4]
sample_dst256_source = sample[1][3][0:4]
st = [] SRC64, = self.A64_view ([sample_src64_source])
for i in range(n_samples // 2): DST64, = self.B64_view ([sample_dst64_source])
st.append ( np.concatenate ( ( SRCDST64, = self.A64_view ([sample_dst64_source])
test_B256[i*2+0], BB[i*2+0], test_B256[i*2+1], BB[i*2+1], DSTSRC64, = self.B64_view ([sample_src64_source])
), axis=1) )
return [ ('AVATAR', np.concatenate ( st, axis=0 ) ) ] SRC_x1_256, = self.A256_view ([sample_src64_source])
DST_x2_256, = self.BA256_view ([sample_dst64_source])
b1 = np.concatenate ( (
np.concatenate ( (sample_src64_source[0], SRC64[0], sample_src64_source[1], SRC64[1], ), axis=1),
np.concatenate ( (sample_src64_source[1], SRC64[1], sample_src64_source[3], SRC64[3], ), axis=1),
np.concatenate ( (sample_dst64_source[0], DST64[0], sample_dst64_source[1], DST64[1], ), axis=1),
np.concatenate ( (sample_dst64_source[2], DST64[2], sample_dst64_source[3], DST64[3], ), axis=1),
), axis=0 )
b2 = np.concatenate ( (
np.concatenate ( (sample_src64_source[0], DSTSRC64[0], sample_src64_source[1], DSTSRC64[1], ), axis=1),
np.concatenate ( (sample_src64_source[2], DSTSRC64[2], sample_src64_source[3], DSTSRC64[3], ), axis=1),
np.concatenate ( (sample_dst64_source[0], SRCDST64[0], sample_dst64_source[1], SRCDST64[1], ), axis=1),
np.concatenate ( (sample_dst64_source[2], SRCDST64[2], sample_dst64_source[3], SRCDST64[3], ), axis=1),
), axis=0 )
result = np.concatenate ( ( np.concatenate ( (b1, sample_src256_source[0], SRC_x1_256[0] ), axis=1 ),
np.concatenate ( (b2, sample_dst256_source[0], DST_x2_256[0] ), axis=1 ),
), axis = 0 )
return [ ('AVATAR', result ) ]
def predictor_func (self, img): def predictor_func (self, img):
x, = self.src256_view ([ np.expand_dims(img, 0) ])[0] x, = self.BA256_view ([ np.expand_dims(img, 0) ])[0]
return x return x
#override #override
def get_converter(self, **in_options): def get_converter(self, **in_options):
return ConverterAvatar(self.predictor_func, predictor_input_size=64, output_size=256, **in_options) return ConverterAvatar(self.predictor_func, predictor_input_size=64, output_size=256, **in_options)
def BuildAE(self): def Build(self):
keras, K = self.keras, self.keras.backend keras, K = self.keras, self.keras.backend
img_shape64 = (64,64,3)
img_shape256 = (256,256,3)
def Encoder(_input): def Encoder(_input):
x = keras.layers.convolutional.Conv2D(90, kernel_size=5, strides=1, padding='same')(_input) x = _input
x = keras.layers.convolutional.Conv2D(90, kernel_size=5, strides=1, padding='same')(x) x = self.keras.layers.convolutional.Conv2D(90, kernel_size=5, strides=1, padding='same')(x)
x = keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x) x = self.keras.layers.convolutional.Conv2D(90, kernel_size=5, strides=1, padding='same')(x)
x = self.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)
x = keras.layers.convolutional.Conv2D(180, kernel_size=3, strides=1, padding='same')(x) x = self.keras.layers.convolutional.Conv2D(180, kernel_size=3, strides=1, padding='same')(x)
x = keras.layers.convolutional.Conv2D(180, kernel_size=3, strides=1, padding='same')(x) x = self.keras.layers.convolutional.Conv2D(180, kernel_size=3, strides=1, padding='same')(x)
x = keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x) x = self.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)
x = keras.layers.convolutional.Conv2D(360, kernel_size=3, strides=1, padding='same')(x) x = self.keras.layers.convolutional.Conv2D(360, kernel_size=3, strides=1, padding='same')(x)
x = keras.layers.convolutional.Conv2D(360, kernel_size=3, strides=1, padding='same')(x) x = self.keras.layers.convolutional.Conv2D(360, kernel_size=3, strides=1, padding='same')(x)
x = keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x) x = self.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)
x = keras.layers.Dense (1024)(x) x = self.keras.layers.Dense (1024)(x)
x = keras.layers.advanced_activations.LeakyReLU(0.1)(x) x = self.keras.layers.advanced_activations.LeakyReLU(0.1)(x)
x = keras.layers.Dropout(0.5)(x) x = self.keras.layers.Dropout(0.5)(x)
x = self.keras.layers.Dense (1024)(x)
x = self.keras.layers.advanced_activations.LeakyReLU(0.1)(x)
x = self.keras.layers.Dropout(0.5)(x)
x = self.keras.layers.Flatten()(x)
x = self.keras.layers.Dense (64)(x)
x = keras.layers.Dense (1024)(x)
x = keras.layers.advanced_activations.LeakyReLU(0.1)(x)
x = keras.layers.Dropout(0.5)(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense (64)(x)
return keras.models.Model (_input, x) return keras.models.Model (_input, x)
encoder256 = Encoder( keras.layers.Input ( (64, 64, 1) ) ) encoder256 = Encoder( keras.layers.Input (img_shape64) )
encoder64 = Encoder( keras.layers.Input ( (64, 64, 1) ) ) encoder64 = Encoder( keras.layers.Input (img_shape64) )
def decoder256_3(encoder): def decoder256(encoder):
decoder_input = keras.layers.Input ( K.int_shape(encoder.outputs[0])[1:] ) decoder_input = keras.layers.Input ( K.int_shape(encoder.outputs[0])[1:] )
x = decoder_input x = decoder_input
x = self.keras.layers.Dense(16 * 16 * 720)(x) x = self.keras.layers.Dense(16 * 16 * 720)(x)
@ -170,7 +205,7 @@ class Model(ModelBase):
x = keras.layers.convolutional.Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x) x = keras.layers.convolutional.Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x)
return keras.models.Model(decoder_input, x) return keras.models.Model(decoder_input, x)
def decoder64_1(encoder): def decoder64(encoder):
decoder_input = keras.layers.Input ( K.int_shape(encoder.outputs[0])[1:] ) decoder_input = keras.layers.Input ( K.int_shape(encoder.outputs[0])[1:] )
x = decoder_input x = decoder_input
x = self.keras.layers.Dense(8 * 8 * 720)(x) x = self.keras.layers.Dense(8 * 8 * 720)(x)
@ -178,10 +213,10 @@ class Model(ModelBase):
x = upscale(keras, x, 360) x = upscale(keras, x, 360)
x = upscale(keras, x, 180) x = upscale(keras, x, 180)
x = upscale(keras, x, 90) x = upscale(keras, x, 90)
x = keras.layers.convolutional.Conv2D(1, kernel_size=5, padding='same', activation='sigmoid')(x) x = keras.layers.convolutional.Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x)
return keras.models.Model(decoder_input, x) return keras.models.Model(decoder_input, x)
return encoder64, decoder64_1(encoder64), decoder64_1(encoder64), encoder256, decoder256_3(encoder256) return img_shape64, img_shape256, encoder64, decoder64(encoder64), decoder64(encoder64), encoder256, decoder256(encoder256)
from models import ConverterBase from models import ConverterBase
from facelib import FaceType from facelib import FaceType
@ -205,7 +240,7 @@ class ConverterAvatar(ConverterBase):
#override #override
def dummy_predict(self): def dummy_predict(self):
self.predictor ( np.zeros ( (self.predictor_input_size, self.predictor_input_size,1), dtype=np.float32) ) self.predictor ( np.zeros ( (self.predictor_input_size, self.predictor_input_size,3), dtype=np.float32) )
#override #override
def convert_image (self, img_bgr, img_face_landmarks, debug): def convert_image (self, img_bgr, img_face_landmarks, debug):
@ -213,9 +248,8 @@ class ConverterAvatar(ConverterBase):
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.predictor_input_size, face_type=FaceType.HALF ) face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.predictor_input_size, face_type=FaceType.HALF )
predictor_input_bgr = cv2.warpAffine( img_bgr, face_mat, (self.predictor_input_size, self.predictor_input_size), flags=cv2.INTER_LANCZOS4 ) predictor_input_bgr = cv2.warpAffine( img_bgr, face_mat, (self.predictor_input_size, self.predictor_input_size), flags=cv2.INTER_LANCZOS4 )
predictor_input_g = np.expand_dims(cv2.cvtColor(predictor_input_bgr, cv2.COLOR_BGR2GRAY),-1)
predicted_bgr = self.predictor ( predictor_input_g ) predicted_bgr = self.predictor ( predictor_input_bgr )
output = cv2.resize ( predicted_bgr, (self.output_size, self.output_size), cv2.INTER_LANCZOS4 ) output = cv2.resize ( predicted_bgr, (self.output_size, self.output_size), cv2.INTER_LANCZOS4 )
if debug: if debug: