Added interactive converter.

With interactive converter you can change any parameter of any frame and see the result in real time.

Converter: added motion_blur_power param.
Motion blur is applied by precomputed motion vectors.
So the moving face will look more realistic.

RecycleGAN model is removed.

Added experimental AVATAR model. Minimum required VRAM is 6GB (NVIDIA), 12GB (AMD)
Usage:
1) place data_src.mp4 10-20min square resolution video of news reporter sitting at the table with static background,
   other faces should not appear in frames.
2) process "extract images from video data_src.bat" with FULL fps
3) place data_dst.mp4 video of face who will control the src face
4) process "extract images from video data_dst FULL FPS.bat"
5) process "data_src mark faces S3FD best GPU.bat"
6) process "data_dst extract unaligned faces S3FD best GPU.bat"
7) train AVATAR.bat stage 1, tune batch size to maximum for your card (32 for 6GB), train to 50k+ iters.
8) train AVATAR.bat stage 2, tune batch size to maximum for your card (4 for 6GB), train to decent sharpness.
9) convert AVATAR.bat
10) converted to mp4.bat

updated versions of modules
This commit is contained in:
iperov 2019-08-24 12:57:29 +04:00
commit 407ce3b1ca
46 changed files with 2394 additions and 1659 deletions

View file

@ -25,11 +25,11 @@ class ModelBase(object):
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, pretraining_data_path=None, debug = False, device_args = None,
ask_enable_autobackup=True,
ask_write_preview_history=True,
ask_target_iter=True,
ask_batch_size=True,
ask_write_preview_history=True,
ask_target_iter=True,
ask_batch_size=True,
ask_sort_by_yaw=True,
ask_random_flip=True,
ask_random_flip=True,
ask_src_scale_mod=True):
device_args['force_gpu_idx'] = device_args.get('force_gpu_idx',-1)
@ -55,7 +55,7 @@ class ModelBase(object):
self.training_data_src_path = training_data_src_path
self.training_data_dst_path = training_data_dst_path
self.pretraining_data_path = pretraining_data_path
self.src_images_paths = None
self.dst_images_paths = None
self.src_yaw_images_paths = None
@ -106,7 +106,7 @@ class ModelBase(object):
choose_preview_history = io.input_bool("Randomly choose new image for preview history? (y/n ?:help skip:%s) : " % (yn_str[False]), False, help_message="Preview image history will stay stuck with old faces if you reuse the same model on different celebs. Choose no unless you are changing src/dst to a new person")
else:
choose_preview_history = False
if ask_target_iter:
if (self.iter == 0 or ask_override):
self.options['target_iter'] = max(0, io.input_int("Target iteration (skip:unlimited/default) : ", 0))
@ -121,7 +121,7 @@ class ModelBase(object):
else:
self.options['batch_size'] = self.options.get('batch_size', 0)
if ask_sort_by_yaw:
if ask_sort_by_yaw:
if (self.iter == 0 or ask_override):
default_sort_by_yaw = self.options.get('sort_by_yaw', False)
self.options['sort_by_yaw'] = io.input_bool("Feed faces to network sorted by yaw? (y/n ?:help skip:%s) : " % (yn_str[default_sort_by_yaw]), default_sort_by_yaw, help_message="NN will not learn src face directions that don't match dst face directions. Do not use if the dst face has hair that covers the jaw." )
@ -139,7 +139,7 @@ class ModelBase(object):
self.options['src_scale_mod'] = np.clip( io.input_int("Src face scale modifier % ( -30...30, ?:help skip:0) : ", 0, help_message="If src face shape is wider than dst, try to decrease this value to get a better result."), -30, 30)
else:
self.options['src_scale_mod'] = self.options.get('src_scale_mod', 0)
self.autobackup = self.options.get('autobackup', False)
if not self.autobackup and 'autobackup' in self.options:
self.options.pop('autobackup')
@ -180,10 +180,10 @@ class ModelBase(object):
else:
self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.device_args['force_gpu_idx'], self.get_model_name()) )
self.autobackups_path = self.model_path / ( '%d_%s_autobackups' % (self.device_args['force_gpu_idx'], self.get_model_name()) )
if self.autobackup:
self.autobackup_current_hour = time.localtime().tm_hour
if not self.autobackups_path.exists():
self.autobackups_path.mkdir(exist_ok=True)
@ -202,7 +202,7 @@ class ModelBase(object):
if not isinstance(generator, SampleGeneratorBase):
raise ValueError('training data generator is not subclass of SampleGeneratorBase')
if self.sample_for_preview is None or choose_preview_history:
if self.sample_for_preview is None or choose_preview_history:
if choose_preview_history and io.is_support_windows():
wnd_name = "[p] - next. [enter] - confirm."
io.named_window(wnd_name)
@ -221,25 +221,25 @@ class ModelBase(object):
break
elif key == ord('p'):
break
try:
io.process_messages(0.1)
except KeyboardInterrupt:
choosed = True
io.destroy_window(wnd_name)
else:
self.sample_for_preview = self.generate_next_sample()
else:
self.sample_for_preview = self.generate_next_sample()
self.last_sample = self.sample_for_preview
###Generate text summary of model hyperparameters
#Find the longest key name and value string. Used as column widths.
width_name = max([len(k) for k in self.options.keys()] + [17]) + 1 # Single space buffer to left edge. Minimum of 17, the length of the longest static string used "Current iteration"
width_value = max([len(str(x)) for x in self.options.values()] + [len(str(self.iter)), len(self.get_model_name())]) + 1 # Single space buffer to right edge
if not self.device_config.cpu_only: #Check length of GPU names
width_value = max([len(nnlib.device.getDeviceName(idx))+1 for idx in self.device_config.gpu_idxs] + [width_value])
width_value = max([len(nnlib.device.getDeviceName(idx))+1 for idx in self.device_config.gpu_idxs] + [width_value])
width_total = width_name + width_value + 2 #Plus 2 for ": "
model_summary_text = []
model_summary_text += [f'=={" Model Summary ":=^{width_total}}=='] # Model/status summary
model_summary_text += [f'=={" "*width_total}==']
@ -247,13 +247,13 @@ class ModelBase(object):
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={"Current iteration": >{width_name}}: {str(self.iter): <{width_value}}=='] # Iter
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={" Model Options ":-^{width_total}}=='] # Model options
model_summary_text += [f'=={" "*width_total}==']
for key in self.options.keys():
model_summary_text += [f'=={key: >{width_name}}: {str(self.options[key]): <{width_value}}=='] # self.options key/value pairs
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={" Running On ":-^{width_total}}=='] # Training hardware info
model_summary_text += [f'=={" "*width_total}==']
if self.device_config.multi_gpu:
@ -266,10 +266,10 @@ class ModelBase(object):
model_summary_text += [f'=={"Device index": >{width_name}}: {idx: <{width_value}}=='] # GPU hardware device index
model_summary_text += [f'=={"Name": >{width_name}}: {nnlib.device.getDeviceName(idx): <{width_value}}=='] # GPU name
vram_str = f'{nnlib.device.getDeviceVRAMTotalGb(idx):.2f}GB' # GPU VRAM - Formated as #.## (or ##.##)
model_summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}==']
model_summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}==']
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={"="*width_total}==']
if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[0] <= 2: # Low VRAM warning
model_summary_text += ["/!\\"]
model_summary_text += ["/!\\ WARNING:"]
@ -277,7 +277,7 @@ class ModelBase(object):
model_summary_text += ["/!\\ If training does not start, close all programs and try again."]
model_summary_text += ["/!\\ Also you can disable Windows Aero Desktop to increase available VRAM."]
model_summary_text += ["/!\\"]
model_summary_text = "\n".join (model_summary_text)
self.model_summary_text = model_summary_text
io.log_info(model_summary_text)
@ -323,6 +323,11 @@ class ModelBase(object):
def get_model_filename_list(self):
return []
#overridable
def get_ConverterConfig(self):
#return ConverterConfig() for the model
raise NotImplementedError
#overridable
def get_converter(self):
raise NotImplementedError
@ -372,9 +377,9 @@ class ModelBase(object):
}
self.model_data_path.write_bytes( pickle.dumps(model_data) )
bckp_filename_list = [ self.get_strpath_storage_for_file(filename) for _, filename in self.get_model_filename_list() ]
bckp_filename_list += [ str(summary_path), str(self.model_data_path) ]
bckp_filename_list = [ self.get_strpath_storage_for_file(filename) for _, filename in self.get_model_filename_list() ]
bckp_filename_list += [ str(summary_path), str(self.model_data_path) ]
if self.autobackup:
current_hour = time.localtime().tm_hour
if self.autobackup_current_hour != current_hour:
@ -383,20 +388,20 @@ class ModelBase(object):
for i in range(15,0,-1):
idx_str = '%.2d' % i
next_idx_str = '%.2d' % (i+1)
idx_backup_path = self.autobackups_path / idx_str
next_idx_packup_path = self.autobackups_path / next_idx_str
if idx_backup_path.exists():
if i == 15:
if i == 15:
Path_utils.delete_all_files(idx_backup_path)
else:
next_idx_packup_path.mkdir(exist_ok=True)
Path_utils.move_all_files (idx_backup_path, next_idx_packup_path)
if i == 1:
idx_backup_path.mkdir(exist_ok=True)
for filename in bckp_filename_list:
idx_backup_path.mkdir(exist_ok=True)
for filename in bckp_filename_list:
shutil.copy ( str(filename), str(idx_backup_path / Path(filename).name) )
previews = self.get_previews()
@ -440,7 +445,7 @@ class ModelBase(object):
model.save_weights( filename + '.tmp' )
rename_list = model_filename_list
"""
#unused
, optimizer_filename_list=[]
@ -464,7 +469,7 @@ class ModelBase(object):
except Exception as e:
print ("Unable to save ", opt_filename)
"""
for _, filename in rename_list:
filename = self.get_strpath_storage_for_file(filename)
source_filename = Path(filename+'.tmp')
@ -473,7 +478,7 @@ class ModelBase(object):
if target_filename.exists():
target_filename.unlink()
source_filename.rename ( str(target_filename) )
def debug_one_iter(self):
images = []
for generator in self.generator_list:
@ -579,8 +584,8 @@ class ModelBase(object):
lh_height = 100
lh_img = np.ones ( (lh_height,w,c) ) * 0.1
if len(loss_history) != 0:
if len(loss_history) != 0:
loss_count = len(loss_history[0])
lh_len = len(loss_history)

View file

@ -0,0 +1,740 @@
from functools import partial
import cv2
import numpy as np
from facelib import FaceType
from interact import interact as io
from mathlib import get_power_of_two
from models import ModelBase
from nnlib import nnlib
from samplelib import *
from facelib import PoseEstimator
class AVATARModel(ModelBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
ask_sort_by_yaw=False,
ask_random_flip=False,
ask_src_scale_mod=False)
#override
def onInitializeOptions(self, is_first_run, ask_override):
if is_first_run:
avatar_type = io.input_int("Avatar type ( 0:source, 1:full_face, 2:head ?:help skip:0) : ", 0, [0,1,2],
help_message="Training target for the model. Source is direct untouched images. Full_face or head are centered nose unaligned faces.")
avatar_type = {0:'source',
1:'full_face',
2:'head'}[avatar_type]
self.options['avatar_type'] = avatar_type
else:
self.options['avatar_type'] = self.options.get('avatar_type', 'source')
if is_first_run or ask_override:
def_stage = self.options.get('stage', 0)
self.options['stage'] = io.input_int("Stage (0, 1, 2 ?:help skip:%d) : " % def_stage, def_stage, [0,1,2], help_message="Train first stage, then second. Tune batch size to maximum possible for both stages.")
else:
self.options['stage'] = self.options.get('stage', 0)
#override
def onInitialize(self, batch_size=-1, **in_options):
exec(nnlib.code_import_all, locals(), globals())
self.set_vram_batch_requirements({6:4})
AVATARModel.initialize_nn_functions()
resolution = self.resolution = 224
avatar_type = self.options['avatar_type']
stage = self.stage = self.options['stage']
df_res = self.df_res = 128
df_bgr_shape = (df_res, df_res, 3)
df_mask_shape = (df_res, df_res, 1)
res_bgr_shape = (resolution, resolution, 3)
res_bgr_t_shape = (resolution, resolution, 9)
self.enc = modelify(AVATARModel.EncFlow())( [Input(df_bgr_shape),] )
self.decA64 = modelify(AVATARModel.DecFlow()) ( [ Input(K.int_shape(self.enc.outputs[0])[1:]) ] )
self.decB64 = modelify(AVATARModel.DecFlow()) ( [ Input(K.int_shape(self.enc.outputs[0])[1:]) ] )
self.D = modelify(AVATARModel.Discriminator() ) (Input(df_bgr_shape))
self.C = modelify(AVATARModel.ResNet (9, use_batch_norm=False, n_blocks=6, ngf=128, use_dropout=False))( Input(res_bgr_t_shape))
#self.CD = modelify(AVATARModel.CDiscriminator() ) (Input(res_bgr_t_shape))
if self.is_first_run():
conv_weights_list = []
for model in [self.enc, self.decA64, self.decB64, self.C, self.D, self.CD]:
for layer in model.layers:
if type(layer) == keras.layers.Conv2D:
conv_weights_list += [layer.weights[0]] #Conv2D kernel_weights
CAInitializerMP ( conv_weights_list )
if not self.is_first_run():
self.load_weights_safe( self.get_model_filename_list() )
def DLoss(labels,logits):
return K.mean(K.binary_crossentropy(labels,logits))
warped_A64 = Input(df_bgr_shape)
real_A64 = Input(df_bgr_shape)
real_A64m = Input(df_mask_shape)
real_B64_t0 = Input(df_bgr_shape)
real_B64_t1 = Input(df_bgr_shape)
real_B64_t2 = Input(df_bgr_shape)
real_A64_t0 = Input(df_bgr_shape)
real_A64m_t0 = Input(df_mask_shape)
real_A_t0 = Input(res_bgr_shape)
real_A64_t1 = Input(df_bgr_shape)
real_A64m_t1 = Input(df_mask_shape)
real_A_t1 = Input(res_bgr_shape)
real_A64_t2 = Input(df_bgr_shape)
real_A64m_t2 = Input(df_mask_shape)
real_A_t2 = Input(res_bgr_shape)
warped_B64 = Input(df_bgr_shape)
real_B64 = Input(df_bgr_shape)
real_B64m = Input(df_mask_shape)
warped_A_code = self.enc (warped_A64)
warped_B_code = self.enc (warped_B64)
rec_A64 = self.decA64(warped_A_code)
rec_B64 = self.decB64(warped_B_code)
rec_AB64 = self.decA64(warped_B_code)
def Lambda_grey_mask (x,m):
return Lambda (lambda x: x[0]*m+(1-m)*0.5, output_shape= K.int_shape(x)[1:3] + (3,)) ([x, m])
def Lambda_gray_pad(x):
a = np.ones((resolution,resolution,3))*0.5
pad = ( resolution - df_res ) // 2
a[pad:-pad:,pad:-pad:,:] = 0
return Lambda ( lambda x: K.spatial_2d_padding(x, padding=((pad, pad), (pad, pad)) ) + K.constant(a, dtype=K.floatx() ),
output_shape=(resolution,resolution,3) ) (x)
def Lambda_concat ( x ):
c = sum ( [ K.int_shape(l)[-1] for l in x ] )
return Lambda ( lambda x: K.concatenate (x, axis=-1), output_shape=K.int_shape(x[0])[1:3] + (c,) ) (x)
def Lambda_Cto3t(x):
return Lambda ( lambda x: x[...,0:3], output_shape= K.int_shape(x)[1:3] + (3,) ) (x), \
Lambda ( lambda x: x[...,3:6], output_shape= K.int_shape(x)[1:3] + (3,) ) (x), \
Lambda ( lambda x: x[...,6:9], output_shape= K.int_shape(x)[1:3] + (3,) ) (x)
real_A64_d = self.D( Lambda_grey_mask(real_A64, real_A64m) )
real_A64_d_ones = K.ones_like(real_A64_d)
fake_A64_d = self.D(rec_AB64)
fake_A64_d_ones = K.ones_like(fake_A64_d)
fake_A64_d_zeros = K.zeros_like(fake_A64_d)
rec_AB_t0 = Lambda_gray_pad( self.decA64 (self.enc (real_B64_t0)) )
rec_AB_t1 = Lambda_gray_pad( self.decA64 (self.enc (real_B64_t1)) )
rec_AB_t2 = Lambda_gray_pad( self.decA64 (self.enc (real_B64_t2)) )
C_in_A_t0 = Lambda_gray_pad( Lambda_grey_mask (real_A64_t0, real_A64m_t0) )
C_in_A_t1 = Lambda_gray_pad( Lambda_grey_mask (real_A64_t1, real_A64m_t1) )
C_in_A_t2 = Lambda_gray_pad( Lambda_grey_mask (real_A64_t2, real_A64m_t2) )
rec_C_A_t0, rec_C_A_t1, rec_C_A_t2 = Lambda_Cto3t ( self.C ( Lambda_concat ( [C_in_A_t0, C_in_A_t1, C_in_A_t2]) ) )
rec_C_AB_t0, rec_C_AB_t1, rec_C_AB_t2 = Lambda_Cto3t( self.C ( Lambda_concat ( [rec_AB_t0, rec_AB_t1, rec_AB_t2]) ) )
#real_A_t012_d = self.CD ( K.concatenate ( [real_A_t0, real_A_t1,real_A_t2], axis=-1) )
#real_A_t012_d_ones = K.ones_like(real_A_t012_d)
#rec_C_AB_t012_d = self.CD ( K.concatenate ( [rec_C_AB_t0,rec_C_AB_t1, rec_C_AB_t2], axis=-1) )
#rec_C_AB_t012_d_ones = K.ones_like(rec_C_AB_t012_d)
#rec_C_AB_t012_d_zeros = K.zeros_like(rec_C_AB_t012_d)
self.G64_view = K.function([warped_A64, warped_B64],[rec_A64, rec_B64, rec_AB64])
self.G_view = K.function([real_A64_t0, real_A64m_t0, real_A64_t1, real_A64m_t1, real_A64_t2, real_A64m_t2, real_B64_t0, real_B64_t1, real_B64_t2], [rec_C_A_t0, rec_C_A_t1, rec_C_A_t2, rec_C_AB_t0, rec_C_AB_t1, rec_C_AB_t2])
if self.is_training_mode:
loss_AB64 = K.mean(10 * dssim(kernel_size=int(df_res/11.6),max_value=1.0) ( rec_A64, real_A64*real_A64m + (1-real_A64m)*0.5) ) + \
K.mean(10 * dssim(kernel_size=int(df_res/11.6),max_value=1.0) ( rec_B64, real_B64*real_B64m + (1-real_B64m)*0.5) ) + 0.1*DLoss(fake_A64_d_ones, fake_A64_d )
weights_AB64 = self.enc.trainable_weights + self.decA64.trainable_weights + self.decB64.trainable_weights
loss_C = K.mean( 10 * dssim(kernel_size=int(resolution/11.6),max_value=1.0) ( real_A_t0, rec_C_A_t0 ) ) + \
K.mean( 10 * dssim(kernel_size=int(resolution/11.6),max_value=1.0) ( real_A_t1, rec_C_A_t1 ) ) + \
K.mean( 10 * dssim(kernel_size=int(resolution/11.6),max_value=1.0) ( real_A_t2, rec_C_A_t2 ) )
#0.1*DLoss(rec_C_AB_t012_d_ones, rec_C_AB_t012_d )
weights_C = self.C.trainable_weights
loss_D = (DLoss(real_A64_d_ones, real_A64_d ) + \
DLoss(fake_A64_d_zeros, fake_A64_d ) ) * 0.5
#loss_CD = ( DLoss(real_A_t012_d_ones, real_A_t012_d) + \
# DLoss(rec_C_AB_t012_d_zeros, rec_C_AB_t012_d) ) * 0.5
#
#weights_CD = self.CD.trainable_weights
def opt(lr=5e-5):
return Adam(lr=lr, beta_1=0.5, beta_2=0.999, tf_cpu_mode=2 if 'tensorflow' in self.device_config.backend else 0 )
self.AB64_train = K.function ([warped_A64, real_A64, real_A64m, warped_B64, real_B64, real_B64m], [loss_AB64], opt().get_updates(loss_AB64, weights_AB64) )
self.C_train = K.function ([real_A64_t0, real_A64m_t0, real_A_t0,
real_A64_t1, real_A64m_t1, real_A_t1,
real_A64_t2, real_A64m_t2, real_A_t2,
real_B64_t0, real_B64_t1, real_B64_t2],[ loss_C ], opt().get_updates(loss_C, weights_C) )
self.D_train = K.function ([warped_A64, real_A64, real_A64m, warped_B64, real_B64, real_B64m],[loss_D], opt().get_updates(loss_D, self.D.trainable_weights) )
#self.CD_train = K.function ([real_A64_t0, real_A64m_t0, real_A_t0,
# real_A64_t1, real_A64m_t1, real_A_t1,
# real_A64_t2, real_A64m_t2, real_A_t2,
# real_B64_t0, real_B64_t1, real_B64_t2 ],[ loss_CD ], opt().get_updates(loss_CD, weights_CD) )
###########
t = SampleProcessor.Types
training_target = {'source' : t.NONE,
'full_face' : t.FACE_TYPE_FULL_NO_ALIGN,
'head' : t.FACE_TYPE_HEAD_NO_ALIGN}[avatar_type]
generators = [
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[ {'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_BGR), 'resolution':df_res},
{'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_BGR), 'resolution':df_res},
{'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_M), 'resolution':df_res}
] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[ {'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_BGR), 'resolution':df_res},
{'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_BGR), 'resolution':df_res},
{'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_M), 'resolution':df_res}
] ),
SampleGeneratorFaceTemporal(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
temporal_image_count=3,
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[{'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_BGR), 'resolution':df_res},#IMG_WARPED_TRANSFORMED
{'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_M), 'resolution':df_res},
{'types': (t.IMG_SOURCE, training_target, t.MODE_BGR), 'resolution':resolution},
] ),
SampleGeneratorFaceTemporal(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
temporal_image_count=3,
sample_process_options=SampleProcessor.Options(random_flip=False),
output_sample_types=[{'types': (t.IMG_SOURCE, t.FACE_TYPE_FULL_NO_ALIGN, t.MODE_BGR), 'resolution':df_res},
{'types': (t.IMG_SOURCE, t.NONE, t.MODE_BGR), 'resolution':resolution},
] ),
]
if self.stage == 1:
generators[2].set_active(False)
generators[3].set_active(False)
elif self.stage == 2:
generators[0].set_active(False)
generators[1].set_active(False)
self.set_training_data_generators (generators)
else:
self.G_convert = K.function([real_B64_t0, real_B64_t1, real_B64_t2],[rec_C_AB_t1])
#override , return [ [model, filename],... ] list
def get_model_filename_list(self):
return [ [self.enc, 'enc.h5'],
[self.decA64, 'decA64.h5'],
[self.decB64, 'decB64.h5'],
[self.C, 'C.h5'],
[self.D, 'D.h5'],
#[self.CD, 'CD.h5'],
]
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
#override
def onTrainOneIter(self, generators_samples, generators_list):
warped_src64, src64, src64m = generators_samples[0]
warped_dst64, dst64, dst64m = generators_samples[1]
real_A64_t0, real_A64m_t0, real_A_t0, real_A64_t1, real_A64m_t1, real_A_t1, real_A64_t2, real_A64m_t2, real_A_t2 = generators_samples[2]
real_B64_t0, _, real_B64_t1, _, real_B64_t2, _ = generators_samples[3]
if self.stage == 0 or self.stage == 1:
loss, = self.AB64_train ( [warped_src64, src64, src64m, warped_dst64, dst64, dst64m] )
loss_D, = self.D_train ( [warped_src64, src64, src64m, warped_dst64, dst64, dst64m] )
if self.stage != 0:
loss_C = loss_CD = 0
if self.stage == 0 or self.stage == 2:
loss_C1, = self.C_train ( [real_A64_t0, real_A64m_t0, real_A_t0,
real_A64_t1, real_A64m_t1, real_A_t1,
real_A64_t2, real_A64m_t2, real_A_t2,
real_B64_t0, real_B64_t1, real_B64_t2] )
loss_C2, = self.C_train ( [real_A64_t2, real_A64m_t2, real_A_t2,
real_A64_t1, real_A64m_t1, real_A_t1,
real_A64_t0, real_A64m_t0, real_A_t0,
real_B64_t0, real_B64_t1, real_B64_t2] )
#loss_CD1, = self.CD_train ( [real_A64_t0, real_A64m_t0, real_A_t0,
# real_A64_t1, real_A64m_t1, real_A_t1,
# real_A64_t2, real_A64m_t2, real_A_t2,
# real_B64_t0, real_B64_t1, real_B64_t2] )
#
#loss_CD2, = self.CD_train ( [real_A64_t2, real_A64m_t2, real_A_t2,
# real_A64_t1, real_A64m_t1, real_A_t1,
# real_A64_t0, real_A64m_t0, real_A_t0,
# real_B64_t0, real_B64_t1, real_B64_t2] )
loss_C = (loss_C1 + loss_C2) / 2
#loss_CD = (loss_CD1 + loss_CD2) / 2
if self.stage != 0:
loss = loss_D = 0
return ( ('loss', loss), ('D', loss_D), ('C', loss_C), ) #('CD', loss_CD) )
#override
def onGetPreview(self, sample):
test_A064w = sample[0][0][0:4]
test_A064r = sample[0][1][0:4]
test_A064m = sample[0][2][0:4]
test_B064w = sample[1][0][0:4]
test_B064r = sample[1][1][0:4]
test_B064m = sample[1][2][0:4]
t_src64_0 = sample[2][0][0:4]
t_src64m_0 = sample[2][1][0:4]
t_src_0 = sample[2][2][0:4]
t_src64_1 = sample[2][3][0:4]
t_src64m_1 = sample[2][4][0:4]
t_src_1 = sample[2][5][0:4]
t_src64_2 = sample[2][6][0:4]
t_src64m_2 = sample[2][7][0:4]
t_src_2 = sample[2][8][0:4]
t_dst64_0 = sample[3][0][0:4]
t_dst_0 = sample[3][1][0:4]
t_dst64_1 = sample[3][2][0:4]
t_dst_1 = sample[3][3][0:4]
t_dst64_2 = sample[3][4][0:4]
t_dst_2 = sample[3][5][0:4]
G64_view_result = self.G64_view ([test_A064r, test_B064r])
test_A064r, test_B064r, rec_A64, rec_B64, rec_AB64 = [ x[0] for x in ([test_A064r, test_B064r] + G64_view_result) ]
sample64x4 = np.concatenate ([ np.concatenate ( [rec_B64, rec_A64], axis=1 ),
np.concatenate ( [test_B064r, rec_AB64], axis=1) ], axis=0 )
sample64x4 = cv2.resize (sample64x4, (self.resolution, self.resolution) )
G_view_result = self.G_view([t_src64_0, t_src64m_0, t_src64_1, t_src64m_1, t_src64_2, t_src64m_2, t_dst64_0, t_dst64_1, t_dst64_2 ])
t_dst_0, t_dst_1, t_dst_2, rec_C_A_t0, rec_C_A_t1, rec_C_A_t2, rec_C_AB_t0, rec_C_AB_t1, rec_C_AB_t2 = [ x[0] for x in ([t_dst_0, t_dst_1, t_dst_2, ] + G_view_result) ]
c1 = np.concatenate ( (sample64x4, rec_C_A_t0, t_dst_0, rec_C_AB_t0 ), axis=1 )
c2 = np.concatenate ( (sample64x4, rec_C_A_t1, t_dst_1, rec_C_AB_t1 ), axis=1 )
c3 = np.concatenate ( (sample64x4, rec_C_A_t2, t_dst_2, rec_C_AB_t2 ), axis=1 )
r = np.concatenate ( [c1,c2,c3], axis=0 )
return [ ('AVATAR', r ) ]
def predictor_func (self, prev_imgs=None, img=None, next_imgs=None, dummy_predict=False):
if dummy_predict:
z = np.zeros ( (1, self.df_res, self.df_res, 3), dtype=np.float32 )
self.G_convert ([z,z,z])
else:
feed = [ prev_imgs[-1][np.newaxis,...], img[np.newaxis,...], next_imgs[0][np.newaxis,...] ]
x = self.G_convert (feed)[0]
return np.clip ( x[0], 0, 1)
#override
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigFaceAvatar(predictor_func=self.predictor_func,
predictor_input_shape=(self.df_res, self.df_res, 3),
temporal_face_count=1
)
@staticmethod
def NLayerDiscriminator(ndf=64, n_layers=3):
exec (nnlib.import_all(), locals(), globals())
#use_bias = True
#def XNormalization(x):
# return InstanceNormalization (axis=-1)(x)
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, use_bias=use_bias)
def func(x):
f = ndf
x = XConv2D( f, 4, strides=2, padding='same', use_bias=True)(x)
f = min( ndf*8, f*2 )
x = LeakyReLU(0.2)(x)
for i in range(n_layers):
x = XConv2D( f, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
f = min( ndf*8, f*2 )
x = XConv2D( f, 4, strides=1, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
return XConv2D( 1, 4, strides=1, padding='same', use_bias=True, activation='sigmoid')(x)#
return func
"""
@staticmethod
def Discriminator(ndf=128):
exec (nnlib.import_all(), locals(), globals())
#use_bias = True
#def XNormalization(x):
# return InstanceNormalization (axis=-1)(x)
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, use_bias=use_bias)
def func(input):
b,h,w,c = K.int_shape(input)
x = input
x = XConv2D( ndf, 4, strides=2, padding='same', use_bias=True)(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*2, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*4, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*8, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
return XConv2D( 1, 4, strides=1, padding='same', use_bias=True, activation='sigmoid')(x)#
return func
"""
@staticmethod
def Discriminator(ndf=128):
exec (nnlib.import_all(), locals(), globals())
use_bias = True
def XNormalization(x):
return InstanceNormalization (axis=-1)(x)
#use_bias = False
#def XNormalization(x):
# return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, use_bias=use_bias)
def func(input):
b,h,w,c = K.int_shape(input)
x = input
x = XConv2D( ndf, 4, strides=2, padding='same', use_bias=True)(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*2, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*4, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*8, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
return XConv2D( 1, 4, strides=1, padding='same', use_bias=True, activation='sigmoid')(x)#
return func
@staticmethod
def CDiscriminator(ndf=256):
exec (nnlib.import_all(), locals(), globals())
use_bias = True
def XNormalization(x):
return InstanceNormalization (axis=-1)(x)
#use_bias = False
#def XNormalization(x):
# return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, use_bias=use_bias)
def func(input):
b,h,w,c = K.int_shape(input)
x = input
x = XConv2D( ndf, 4, strides=2, padding='same', use_bias=True)(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*2, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = XConv2D( ndf*4, 4, strides=2, padding='same')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
#x = XConv2D( ndf*8, 4, strides=2, padding='same')(x)
#x = XNormalization(x)
#x = LeakyReLU(0.2)(x)
return XConv2D( 1, 4, strides=1, padding='same', use_bias=True, activation='sigmoid')(x)#
return func
@staticmethod
def EncFlow(padding='zero', **kwargs):
exec (nnlib.import_all(), locals(), globals())
use_bias = False
def XNorm(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, padding=padding, use_bias=use_bias)
def downscale (dim):
def func(x):
return LeakyReLU(0.1)( Conv2D(dim, 5, strides=2, padding='same')(x))
return func
def upscale (dim):
def func(x):
return SubpixelUpscaler()(LeakyReLU(0.1)(Conv2D(dim * 4, 3, strides=1, padding='same')(x)))
return func
def func(input):
x, = input
b,h,w,c = K.int_shape(x)
dim_res = w // 16
x = downscale(64)(x)
x = downscale(128)(x)
x = downscale(256)(x)
x = downscale(512)(x)
x = Dense(512)(Flatten()(x))
x = Dense(dim_res * dim_res * 512)(x)
x = Reshape((dim_res, dim_res, 512))(x)
x = upscale(512)(x)
return x
return func
@staticmethod
def DecFlow(output_nc=3, **kwargs):
exec (nnlib.import_all(), locals(), globals())
ResidualBlock = AVATARModel.ResidualBlock
upscale = AVATARModel.upscale
to_bgr = AVATARModel.to_bgr
def func(input):
x = input[0]
x = upscale(512)(x)
x = upscale(256)(x)
x = upscale(128)(x)
return to_bgr(output_nc) (x)
return func
"""
@staticmethod
def CNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False):
exec (nnlib.import_all(), locals(), globals())
if not use_batch_norm:
use_bias = True
def XNormalization(x):
return InstanceNormalization (axis=-1)(x)
else:
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, padding='same', use_bias=use_bias)
XConv2DTranspose = partial(Conv2DTranspose, padding='same', use_bias=use_bias)
def ResnetBlock(dim, use_dropout=False):
def func(input):
x = input
x = XConv2D(dim, 3, strides=1)(x)
x = XNormalization(x)
x = ReLU()(x)
if use_dropout:
x = Dropout(0.5)(x)
x = XConv2D(dim, 3, strides=1)(x)
x = XNormalization(x)
x = ReLU()(x)
return Add()([x,input])
return func
def preprocess(target_res):
def func(input):
inp_shape = K.int_shape (input[0])
t_len = len(input)
total_ch = 0
for i in range(t_len):
total_ch += K.int_shape (input[i])[-1]
K.concatenate ( input, axis=-1) )
import code
c ode.interact(local=dict(globals(), **locals()))
x_shape = K.int_shape(x)[1:]
pad = (target_res - x_shape[0]) // 2
a = np.ones((target_res,target_res,3))*0.5
a[pad:-pad:,pad:-pad:,:] = 0
return K.spatial_2d_padding(x, padding=((pad, pad), (pad, pad)) ) + K.constant(a, dtype=K.floatx() )
return func
def func(input):
inp_shape = K.int_shape (input[0])
t_len = len(input)
total_ch = 0
for i in range(t_len):
total_ch += K.int_shape (input[i])[-1]
x = Lambda ( preprocess(128) , output_shape=(inp_shape[1], inp_shape[2], total_ch) ) (input)
x = ReLU()(XNormalization(XConv2D(ngf, 7, strides=1)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*2, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*4, 3, strides=2)(x)))
for i in range(n_blocks):
x = ResnetBlock(ngf*4, use_dropout=use_dropout)(x)
x = ReLU()(XNormalization(XConv2DTranspose(ngf*2, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2DTranspose(ngf , 3, strides=2)(x)))
x = XConv2D(output_nc, 7, strides=1, activation='sigmoid', use_bias=True)(x)
return x
return func
"""
@staticmethod
def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False):
exec (nnlib.import_all(), locals(), globals())
if not use_batch_norm:
use_bias = True
def XNormalization(x):
return InstanceNormalization (axis=-1)(x)
else:
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, padding='same', use_bias=use_bias)
XConv2DTranspose = partial(Conv2DTranspose, padding='same', use_bias=use_bias)
def func(input):
def ResnetBlock(dim, use_dropout=False):
def func(input):
x = input
x = XConv2D(dim, 3, strides=1)(x)
x = XNormalization(x)
x = ReLU()(x)
if use_dropout:
x = Dropout(0.5)(x)
x = XConv2D(dim, 3, strides=1)(x)
x = XNormalization(x)
x = ReLU()(x)
return Add()([x,input])
return func
x = input
x = ReLU()(XNormalization(XConv2D(ngf, 7, strides=1)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*2, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*4, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*4, 3, strides=2)(x)))
for i in range(n_blocks):
x = ResnetBlock(ngf*4, use_dropout=use_dropout)(x)
x = ReLU()(XNormalization(XConv2DTranspose(ngf*4, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2DTranspose(ngf*2, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2DTranspose(ngf , 3, strides=2)(x)))
x = XConv2D(output_nc, 7, strides=1, activation='sigmoid', use_bias=True)(x)
return x
return func
@staticmethod
def initialize_nn_functions():
exec (nnlib.import_all(), locals(), globals())
class ResidualBlock(object):
def __init__(self, filters, kernel_size=3, padding='zero', **kwargs):
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
def __call__(self, inp):
x = inp
x = Conv2D(self.filters, kernel_size=self.kernel_size, padding=self.padding)(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(self.filters, kernel_size=self.kernel_size, padding=self.padding)(x)
x = Add()([x, inp])
x = LeakyReLU(0.2)(x)
return x
AVATARModel.ResidualBlock = ResidualBlock
def downscale (dim, padding='zero', act='', **kwargs):
def func(x):
return LeakyReLU(0.2) (Conv2D(dim, kernel_size=5, strides=2, padding=padding)(x))
return func
AVATARModel.downscale = downscale
def upscale (dim, padding='zero', norm='', act='', **kwargs):
def func(x):
return SubpixelUpscaler()( LeakyReLU(0.2)(Conv2D(dim * 4, kernel_size=3, strides=1, padding=padding)(x)))
return func
AVATARModel.upscale = upscale
def to_bgr (output_nc, padding='zero', **kwargs):
def func(x):
return Conv2D(output_nc, kernel_size=5, padding=padding, activation='sigmoid')(x)
return func
AVATARModel.to_bgr = to_bgr
Model = AVATARModel

View file

@ -59,13 +59,13 @@ class Model(ModelBase):
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=output_sample_types)
])
#override
def get_model_filename_list(self):
return [[self.encoder, 'encoder.h5'],
[self.decoder_src, 'decoder_src.h5'],
[self.decoder_dst, 'decoder_dst.h5']]
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
@ -111,18 +111,26 @@ class Model(ModelBase):
return [ ('DF', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
x, mx = self.convert ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
def predictor_func (self, face=None, dummy_predict=False):
if dummy_predict:
self.AE_convert ([ np.zeros ( (1, 128, 128, 3) ), dtype=np.float32 ) ])
else:
x, mx = self.convert ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
face_type=FaceType.FULL,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0)
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(128,128,3),
predictor_masked=True,
face_type=FaceType.FULL,
default_mode=4,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
def Build(self, input_layer):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -75,7 +75,7 @@ class Model(ModelBase):
return [[self.encoder, 'encoder.h5'],
[self.decoder_src, 'decoder_src.h5'],
[self.decoder_dst, 'decoder_dst.h5']]
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
@ -119,18 +119,26 @@ class Model(ModelBase):
return [ ('H128', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
x, mx = self.src_view ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
def predictor_func (self, face=None, dummy_predict=False):
if dummy_predict:
self.AE_convert ([ np.zeros ( (1, 128, 128, 3) ), dtype=np.float32 ) ])
else:
x, mx = self.src_view ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
face_type=FaceType.HALF,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100)
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(128,128,3),
predictor_masked=True,
face_type=FaceType.HALF,
default_mode=4,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
def Build(self, lighter_ae):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -76,7 +76,7 @@ class Model(ModelBase):
return [[self.encoder, 'encoder.h5'],
[self.decoder_src, 'decoder_src.h5'],
[self.decoder_dst, 'decoder_dst.h5']]
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
@ -120,18 +120,26 @@ class Model(ModelBase):
return [ ('H64', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
x, mx = self.src_view ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
def predictor_func (self, face=None, dummy_predict=False):
if dummy_predict:
self.AE_convert ([ np.zeros ( (1, 64, 64, 3) ), dtype=np.float32 ) ])
else:
x, mx = self.src_view ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=64,
face_type=FaceType.HALF,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100)
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(64,64,3),
predictor_masked=True,
face_type=FaceType.HALF,
default_mode=4,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
def Build(self, lighter_ae):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -70,8 +70,8 @@ class Model(ModelBase):
return [[self.encoder, 'encoder.h5'],
[self.decoder, 'decoder.h5'],
[self.inter_B, 'inter_B.h5'],
[self.inter_AB, 'inter_AB.h5']]
[self.inter_AB, 'inter_AB.h5']]
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
@ -117,18 +117,26 @@ class Model(ModelBase):
return [ ('LIAEF128', np.concatenate ( st, axis=0 ) ) ]
def predictor_func (self, face):
x, mx = self.convert ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
def predictor_func (self, face=None, dummy_predict=False):
if dummy_predict:
self.AE_convert ([ np.zeros ( (1, 128, 128, 3) ), dtype=np.float32 ) ])
else:
x, mx = self.convert ( [ face[np.newaxis,...] ] )
return x[0], mx[0][...,0]
#override
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
face_type=FaceType.FULL,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0)
def get_ConverterConfig(self):
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(128,128,3),
predictor_masked=True,
face_type=FaceType.FULL,
default_mode=4,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0,
default_erode_mask_modifier=0,
default_blur_mask_modifier=0,
)
def Build(self, input_layer):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -1,482 +0,0 @@
from functools import partial
import cv2
import numpy as np
from facelib import FaceType
from interact import interact as io
from mathlib import get_power_of_two
from models import ModelBase
from nnlib import nnlib
from samplelib import *
class RecycleGANModel(ModelBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs,
ask_sort_by_yaw=False,
ask_random_flip=False,
ask_src_scale_mod=False)
#override
def onInitializeOptions(self, is_first_run, ask_override):
if is_first_run:
self.options['resolution'] = io.input_int("Resolution ( 128,256 ?:help skip:128) : ", 128, [128,256], help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
else:
self.options['resolution'] = self.options.get('resolution', 128)
#override
def onInitialize(self, batch_size=-1, **in_options):
exec(nnlib.code_import_all, locals(), globals())
self.set_vram_batch_requirements({6:16})
resolution = self.options['resolution']
bgr_shape = (resolution, resolution, 3)
ngf = 64
npf = 32
ndf = 64
lambda_A = 10
lambda_B = 10
use_batch_norm = True #created_batch_size > 1
self.GA = modelify(RecycleGANModel.ResNet (bgr_shape[2], use_batch_norm, n_blocks=6, ngf=ngf, use_dropout=True))(Input(bgr_shape))
self.GB = modelify(RecycleGANModel.ResNet (bgr_shape[2], use_batch_norm, n_blocks=6, ngf=ngf, use_dropout=True))(Input(bgr_shape))
#self.GA = modelify(UNet (bgr_shape[2], use_batch_norm, num_downs=get_power_of_two(resolution)-1, ngf=ngf, use_dropout=True))(Input(bgr_shape))
#self.GB = modelify(UNet (bgr_shape[2], use_batch_norm, num_downs=get_power_of_two(resolution)-1, ngf=ngf, use_dropout=True))(Input(bgr_shape))
self.PA = modelify(RecycleGANModel.UNetTemporalPredictor(bgr_shape[2], use_batch_norm, ngf=npf))([Input(bgr_shape), Input(bgr_shape)])
self.PB = modelify(RecycleGANModel.UNetTemporalPredictor(bgr_shape[2], use_batch_norm, ngf=npf))([Input(bgr_shape), Input(bgr_shape)])
self.DA = modelify(RecycleGANModel.PatchDiscriminator(ndf=ndf) ) (Input(bgr_shape))
self.DB = modelify(RecycleGANModel.PatchDiscriminator(ndf=ndf) ) (Input(bgr_shape))
if not self.is_first_run():
weights_to_load = [
(self.GA, 'GA.h5'),
(self.DA, 'DA.h5'),
(self.PA, 'PA.h5'),
(self.GB, 'GB.h5'),
(self.DB, 'DB.h5'),
(self.PB, 'PB.h5'),
]
self.load_weights_safe(weights_to_load)
real_A0 = Input(bgr_shape, name="real_A0")
real_A1 = Input(bgr_shape, name="real_A1")
real_A2 = Input(bgr_shape, name="real_A2")
real_B0 = Input(bgr_shape, name="real_B0")
real_B1 = Input(bgr_shape, name="real_B1")
real_B2 = Input(bgr_shape, name="real_B2")
DA_ones = K.ones_like ( K.shape(self.DA.outputs[0]) )
DA_zeros = K.zeros_like ( K.shape(self.DA.outputs[0] ))
DB_ones = K.ones_like ( K.shape(self.DB.outputs[0] ))
DB_zeros = K.zeros_like ( K.shape(self.DB.outputs[0] ))
def DLoss(labels,logits):
return K.mean(K.binary_crossentropy(labels,logits))
def CycleLoss (t1,t2):
return K.mean(K.abs(t1 - t2))
def RecurrentLOSS(t1,t2):
return K.mean(K.abs(t1 - t2))
def RecycleLOSS(t1,t2):
return K.mean(K.abs(t1 - t2))
fake_B0 = self.GA(real_A0)
fake_B1 = self.GA(real_A1)
fake_A0 = self.GB(real_B0)
fake_A1 = self.GB(real_B1)
real_A0_d = self.DA(real_A0)
real_A0_d_ones = K.ones_like(real_A0_d)
real_A1_d = self.DA(real_A1)
real_A1_d_ones = K.ones_like(real_A1_d)
fake_A0_d = self.DA(fake_A0)
fake_A0_d_ones = K.ones_like(fake_A0_d)
fake_A0_d_zeros = K.zeros_like(fake_A0_d)
fake_A1_d = self.DA(fake_A1)
fake_A1_d_ones = K.ones_like(fake_A1_d)
fake_A1_d_zeros = K.zeros_like(fake_A1_d)
real_B0_d = self.DB(real_B0)
real_B0_d_ones = K.ones_like(real_B0_d)
real_B1_d = self.DB(real_B1)
real_B1_d_ones = K.ones_like(real_B1_d)
fake_B0_d = self.DB(fake_B0)
fake_B0_d_ones = K.ones_like(fake_B0_d)
fake_B0_d_zeros = K.zeros_like(fake_B0_d)
fake_B1_d = self.DB(fake_B1)
fake_B1_d_ones = K.ones_like(fake_B1_d)
fake_B1_d_zeros = K.zeros_like(fake_B1_d)
pred_A2 = self.PA ( [real_A0, real_A1])
pred_B2 = self.PB ( [real_B0, real_B1])
rec_A2 = self.GB ( self.PB ( [fake_B0, fake_B1]) )
rec_B2 = self.GA ( self.PA ( [fake_A0, fake_A1]))
loss_GA = DLoss(fake_B0_d_ones, fake_B0_d ) + \
DLoss(fake_B1_d_ones, fake_B1_d ) + \
lambda_A * (RecurrentLOSS(pred_A2, real_A2) + \
RecycleLOSS(rec_B2, real_B2) )
weights_GA = self.GA.trainable_weights + self.PA.trainable_weights
loss_GB = DLoss(fake_A0_d_ones, fake_A0_d ) + \
DLoss(fake_A1_d_ones, fake_A1_d ) + \
lambda_B * (RecurrentLOSS(pred_B2, real_B2) + \
RecycleLOSS(rec_A2, real_A2) )
weights_GB = self.GB.trainable_weights + self.PB.trainable_weights
def opt():
return Adam(lr=2e-4, beta_1=0.5, beta_2=0.999, tf_cpu_mode=2)#, clipnorm=1)
self.GA_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_GA],
opt().get_updates(loss_GA, weights_GA) )
self.GB_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_GB],
opt().get_updates(loss_GB, weights_GB) )
###########
loss_D_A0 = ( DLoss(real_A0_d_ones, real_A0_d ) + \
DLoss(fake_A0_d_zeros, fake_A0_d ) ) * 0.5
loss_D_A1 = ( DLoss(real_A1_d_ones, real_A1_d ) + \
DLoss(fake_A1_d_zeros, fake_A1_d ) ) * 0.5
loss_D_A = loss_D_A0 + loss_D_A1
self.DA_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_D_A],
opt().get_updates(loss_D_A, self.DA.trainable_weights) )
############
loss_D_B0 = ( DLoss(real_B0_d_ones, real_B0_d ) + \
DLoss(fake_B0_d_zeros, fake_B0_d ) ) * 0.5
loss_D_B1 = ( DLoss(real_B1_d_ones, real_B1_d ) + \
DLoss(fake_B1_d_zeros, fake_B1_d ) ) * 0.5
loss_D_B = loss_D_B0 + loss_D_B1
self.DB_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_D_B],
opt().get_updates(loss_D_B, self.DB.trainable_weights) )
############
self.G_view = K.function([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[fake_A0, fake_A1, pred_A2, rec_A2, fake_B0, fake_B1, pred_B2, rec_B2 ])
if self.is_training_mode:
t = SampleProcessor.Types
output_sample_types=[ { 'types': (t.IMG_SOURCE, t.MODE_BGR), 'resolution':resolution, 'normalize_tanh' : True} ]
self.set_training_data_generators ([
SampleGeneratorImageTemporal(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
temporal_image_count=3,
sample_process_options=SampleProcessor.Options(random_flip = False),
output_sample_types=output_sample_types ),
SampleGeneratorImageTemporal(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
temporal_image_count=3,
sample_process_options=SampleProcessor.Options(random_flip = False),
output_sample_types=output_sample_types ),
])
else:
self.G_convert = K.function([real_B0],[fake_A0])
#override
def get_model_filename_list(self):
return [ [self.GA, 'GA.h5'],
[self.GB, 'GB.h5'],
[self.DA, 'DA.h5'],
[self.DB, 'DB.h5'],
[self.PA, 'PA.h5'],
[self.PB, 'PB.h5'] ]
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
#override
def onTrainOneIter(self, generators_samples, generators_list):
source_src_0, source_src_1, source_src_2, = generators_samples[0]
source_dst_0, source_dst_1, source_dst_2, = generators_samples[1]
feed = [source_src_0, source_src_1, source_src_2, source_dst_0, source_dst_1, source_dst_2]
loss_GA, = self.GA_train ( feed )
loss_GB, = self.GB_train ( feed )
loss_DA, = self.DA_train( feed )
loss_DB, = self.DB_train( feed )
return ( ('GA', loss_GA), ('GB', loss_GB), ('DA', loss_DA), ('DB', loss_DB) )
#override
def onGetPreview(self, sample):
test_A0 = sample[0][0]
test_A1 = sample[0][1]
test_A2 = sample[0][2]
test_B0 = sample[1][0]
test_B1 = sample[1][1]
test_B2 = sample[1][2]
G_view_result = self.G_view([test_A0, test_A1, test_A2, test_B0, test_B1, test_B2])
fake_A0, fake_A1, pred_A2, rec_A2, fake_B0, fake_B1, pred_B2, rec_B2 = [ x[0] / 2 + 0.5 for x in G_view_result]
test_A0, test_A1, test_A2, test_B0, test_B1, test_B2 = [ x[0] / 2 + 0.5 for x in [test_A0, test_A1, test_A2, test_B0, test_B1, test_B2] ]
r = np.concatenate ((np.concatenate ( (test_A0, test_A1, test_A2, pred_A2, fake_B0, fake_B1, rec_A2), axis=1),
np.concatenate ( (test_B0, test_B1, test_B2, pred_B2, fake_A0, fake_A1, rec_B2), axis=1)
), axis=0)
return [ ('RecycleGAN', r ) ]
def predictor_func (self, face):
x = self.G_convert ( [ face[np.newaxis,...]*2-1 ] )[0]
return np.clip ( x[0] / 2 + 0.5 , 0, 1)
#override
def get_converter(self, **in_options):
from converters import ConverterImage
return ConverterImage(self.predictor_func,
predictor_input_size=self.options['resolution'],
**in_options)
@staticmethod
def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False):
exec (nnlib.import_all(), locals(), globals())
if not use_batch_norm:
use_bias = True
def XNormalization(x):
return InstanceNormalization (axis=-1)(x)
else:
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, padding='same', use_bias=use_bias)
XConv2DTranspose = partial(Conv2DTranspose, padding='same', use_bias=use_bias)
def func(input):
def ResnetBlock(dim, use_dropout=False):
def func(input):
x = input
x = XConv2D(dim, 3, strides=1)(x)
x = XNormalization(x)
x = ReLU()(x)
if use_dropout:
x = Dropout(0.5)(x)
x = XConv2D(dim, 3, strides=1)(x)
x = XNormalization(x)
x = ReLU()(x)
return Add()([x,input])
return func
x = input
x = ReLU()(XNormalization(XConv2D(ngf, 7, strides=1)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*2, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2D(ngf*4, 3, strides=2)(x)))
for i in range(n_blocks):
x = ResnetBlock(ngf*4, use_dropout=use_dropout)(x)
x = ReLU()(XNormalization(XConv2DTranspose(ngf*2, 3, strides=2)(x)))
x = ReLU()(XNormalization(XConv2DTranspose(ngf , 3, strides=2)(x)))
x = XConv2D(output_nc, 7, strides=1, activation='tanh', use_bias=True)(x)
return x
return func
@staticmethod
def UNet(output_nc, use_batch_norm, ngf=64, use_dropout=False):
exec (nnlib.import_all(), locals(), globals())
if not use_batch_norm:
use_bias = True
def XNormalizationL():
return InstanceNormalization (axis=-1)
else:
use_bias = False
def XNormalizationL():
return BatchNormalization (axis=-1)
def XNormalization(x):
return XNormalizationL()(x)
XConv2D = partial(Conv2D, padding='same', use_bias=use_bias)
XConv2DTranspose = partial(Conv2DTranspose, padding='same', use_bias=use_bias)
def func(input):
b,h,w,c = K.int_shape(input)
n_downs = get_power_of_two(w) - 4
Norm = XNormalizationL()
Norm2 = XNormalizationL()
Norm4 = XNormalizationL()
Norm8 = XNormalizationL()
x = input
x = e1 = XConv2D( ngf, 4, strides=2, use_bias=True ) (x)
x = e2 = Norm2( XConv2D( ngf*2, 4, strides=2 )( LeakyReLU(0.2)(x) ) )
x = e3 = Norm4( XConv2D( ngf*4, 4, strides=2 )( LeakyReLU(0.2)(x) ) )
l = []
for i in range(n_downs):
x = Norm8( XConv2D( ngf*8, 4, strides=2 )( LeakyReLU(0.2)(x) ) )
l += [x]
x = XConv2D( ngf*8, 4, strides=2, use_bias=True )( LeakyReLU(0.2)(x) )
for i in range(n_downs):
x = Norm8( XConv2DTranspose( ngf*8, 4, strides=2 )( ReLU()(x) ) )
if i <= n_downs-2:
x = Dropout(0.5)(x)
x = Concatenate(axis=-1)([x, l[-i-1] ])
x = Norm4( XConv2DTranspose( ngf*4, 4, strides=2 )( ReLU()(x) ) )
x = Concatenate(axis=-1)([x, e3])
x = Norm2( XConv2DTranspose( ngf*2, 4, strides=2 )( ReLU()(x) ) )
x = Concatenate(axis=-1)([x, e2])
x = Norm( XConv2DTranspose( ngf, 4, strides=2 )( ReLU()(x) ) )
x = Concatenate(axis=-1)([x, e1])
x = XConv2DTranspose(output_nc, 4, strides=2, activation='tanh', use_bias=True)( ReLU()(x) )
return x
return func
@staticmethod
def UNetTemporalPredictor(output_nc, use_batch_norm, ngf=64, use_dropout=False):
exec (nnlib.import_all(), locals(), globals())
def func(inputs):
past_2_image_tensor, past_1_image_tensor = inputs
x = Concatenate(axis=-1)([ past_2_image_tensor, past_1_image_tensor ])
x = UNet(3, use_batch_norm, ngf=ngf, use_dropout=use_dropout) (x)
return x
return func
@staticmethod
def PatchDiscriminator(ndf=64):
exec (nnlib.import_all(), locals(), globals())
#use_bias = True
#def XNormalization(x):
# return InstanceNormalization (axis=-1)(x)
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, use_bias=use_bias)
def func(input):
b,h,w,c = K.int_shape(input)
x = input
x = ZeroPadding2D((1,1))(x)
x = XConv2D( ndf, 4, strides=2, padding='valid', use_bias=True)(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
x = XConv2D( ndf*2, 4, strides=2, padding='valid')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
x = XConv2D( ndf*4, 4, strides=2, padding='valid')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
x = XConv2D( ndf*8, 4, strides=2, padding='valid')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
x = XConv2D( ndf*8, 4, strides=2, padding='valid')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
return XConv2D( 1, 4, strides=1, padding='valid', use_bias=True, activation='sigmoid')(x)#
return func
@staticmethod
def NLayerDiscriminator(ndf=64, n_layers=3):
exec (nnlib.import_all(), locals(), globals())
#use_bias = True
#def XNormalization(x):
# return InstanceNormalization (axis=-1)(x)
use_bias = False
def XNormalization(x):
return BatchNormalization (axis=-1)(x)
XConv2D = partial(Conv2D, use_bias=use_bias)
def func(input):
b,h,w,c = K.int_shape(input)
x = input
f = ndf
x = ZeroPadding2D((1,1))(x)
x = XConv2D( f, 4, strides=2, padding='valid', use_bias=True)(x)
f = min( ndf*8, f*2 )
x = LeakyReLU(0.2)(x)
for i in range(n_layers):
x = ZeroPadding2D((1,1))(x)
x = XConv2D( f, 4, strides=2, padding='valid')(x)
f = min( ndf*8, f*2 )
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
x = XConv2D( f, 4, strides=1, padding='valid')(x)
x = XNormalization(x)
x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x)
return XConv2D( 1, 4, strides=1, padding='valid', use_bias=True, activation='sigmoid')(x)#
return func
Model = RecycleGANModel

View file

@ -24,7 +24,7 @@ class SAEModel(ModelBase):
#override
def onInitializeOptions(self, is_first_run, ask_override):
yn_str = {True:'y',False:'n'}
default_resolution = 128
default_archi = 'df'
default_face_type = 'f'
@ -90,20 +90,20 @@ class SAEModel(ModelBase):
default_apply_random_ct = False if is_first_run else self.options.get('apply_random_ct', False)
self.options['apply_random_ct'] = io.input_bool ("Apply random color transfer to src faceset? (y/n, ?:help skip:%s) : " % (yn_str[default_apply_random_ct]), default_apply_random_ct, help_message="Increase variativity of src samples by apply LCT color transfer from random dst samples. It is like 'face_style' learning, but more precise color transfer and without risk of model collapse, also it does not require additional GPU resources, but the training time may be longer, due to the src faceset is becoming more diverse.")
if nnlib.device.backend != 'plaidML': # todo https://github.com/plaidml/plaidml/issues/301
default_clipgrad = False if is_first_run else self.options.get('clipgrad', False)
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping? (y/n, ?:help skip:%s) : " % (yn_str[default_clipgrad]), default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
else:
self.options['clipgrad'] = False
else:
self.options['pixel_loss'] = self.options.get('pixel_loss', False)
self.options['face_style_power'] = self.options.get('face_style_power', default_face_style_power)
self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power)
self.options['apply_random_ct'] = self.options.get('apply_random_ct', False)
self.options['clipgrad'] = self.options.get('clipgrad', False)
if is_first_run:
self.options['pretrain'] = io.input_bool ("Pretrain the model? (y/n, ?:help skip:n) : ", False, help_message="Pretrain the model with large amount of various faces. This technique may help to train the fake with overly different face shapes and light conditions of src/dst data. Face will be look more like a morphed. To reduce the morph effect, some model files will be initialized but not be updated after pretrain: LIAE: inter_AB.h5 DF: encoder.h5. The longer you pretrain the model the more morphed face will look. After that, save and run the training again.")
else:
@ -383,7 +383,7 @@ class SAEModel(ModelBase):
[ {'types' : (t.IMG_TRANSFORMED, face_type, t_mode_bgr), 'resolution': resolution // (2**i)} for i in range(ms_count)] + \
[ {'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M), 'resolution': resolution // (2**i) } for i in range(ms_count)])
])
#override
def get_model_filename_list(self):
ar = []
@ -413,7 +413,7 @@ class SAEModel(ModelBase):
ar += [ [self.decoder_srcm, 'decoder_srcm.h5'],
[self.decoder_dstm, 'decoder_dstm.h5'] ]
return ar
#override
def onSave(self):
self.save_weights_safe( self.get_model_filename_list() )
@ -469,17 +469,20 @@ class SAEModel(ModelBase):
return result
def predictor_func (self, face):
if self.options['learn_mask']:
bgr, mask_dst_dstm, mask_src_dstm = self.AE_convert ([face[np.newaxis,...]])
mask = mask_dst_dstm[0] * mask_src_dstm[0]
return bgr[0], mask[...,0]
def predictor_func (self, face=None, dummy_predict=False):
if dummy_predict:
self.AE_convert ([ np.zeros ( (1, self.options['resolution'], self.options['resolution'], 3), dtype=np.float32 ) ])
else:
bgr, = self.AE_convert ([face[np.newaxis,...]])
return bgr[0]
if self.options['learn_mask']:
bgr, mask_dst_dstm, mask_src_dstm = self.AE_convert ([face[np.newaxis,...]])
mask = mask_dst_dstm[0] * mask_src_dstm[0]
return bgr[0], mask[...,0]
else:
bgr, = self.AE_convert ([face[np.newaxis,...]])
return bgr[0]
#override
def get_converter(self):
def get_ConverterConfig(self):
base_erode_mask_modifier = 30 if self.options['face_type'] == 'f' else 100
base_blur_mask_modifier = 0 if self.options['face_type'] == 'f' else 100
@ -489,17 +492,18 @@ class SAEModel(ModelBase):
face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=self.options['resolution'],
predictor_masked=self.options['learn_mask'],
face_type=face_type,
default_mode = 1 if self.options['apply_random_ct'] or self.options['face_style_power'] or self.options['bg_style_power'] else 4,
base_erode_mask_modifier=base_erode_mask_modifier,
base_blur_mask_modifier=base_blur_mask_modifier,
default_erode_mask_modifier=default_erode_mask_modifier,
default_blur_mask_modifier=default_blur_mask_modifier,
clip_hborder_mask_per=0.0625 if (self.options['face_type'] == 'f') else 0)
import converters
return converters.ConverterConfigMasked(predictor_func=self.predictor_func,
predictor_input_shape=(self.options['resolution'], self.options['resolution'], 3),
predictor_masked=self.options['learn_mask'],
face_type=face_type,
default_mode = 1 if self.options['apply_random_ct'] or self.options['face_style_power'] or self.options['bg_style_power'] else 4,
base_erode_mask_modifier=base_erode_mask_modifier,
base_blur_mask_modifier=base_blur_mask_modifier,
default_erode_mask_modifier=default_erode_mask_modifier,
default_blur_mask_modifier=default_blur_mask_modifier,
clip_hborder_mask_per=0.0625 if (self.options['face_type'] == 'f') else 0,
)
@staticmethod
def initialize_nn_functions():
@ -545,7 +549,7 @@ class SAEModel(ModelBase):
return Norm(norm)( Act(act) (Conv2D(dim, kernel_size=5, strides=2, padding=padding)(x)) )
return func
SAEModel.downscale = downscale
#def downscale (dim, padding='zero', norm='', act='', **kwargs):
# def func(x):
# return BlurPool()( Norm(norm)( Act(act) (Conv2D(dim, kernel_size=5, strides=1, padding=padding)(x)) ) )