Converter:

Session is now saved to the model folder.

blur and erode ranges are increased to -400+400

hist-match-bw is now replaced with seamless2 mode.

Added 'ebs' color transfer mode (works only on Windows).

FANSEG model (used in FAN-x mask modes) is retrained with new model configuration
and now produces better precision and less jitter
This commit is contained in:
Colombo 2019-09-07 13:57:42 +04:00
commit 7ed38a8097
29 changed files with 768 additions and 314 deletions

View file

@ -14,8 +14,8 @@ def process_frame_info(frame_info, inp_sh):
img = cv2.warpAffine( img, img_mat, inp_sh[0:2], borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
return img
def ConvertFaceAvatar (cfg, prev_temporal_frame_infos, frame_info, next_temporal_frame_infos):
inp_sh = cfg.predictor_input_shape
def ConvertFaceAvatar (predictor_func, predictor_input_shape, cfg, prev_temporal_frame_infos, frame_info, next_temporal_frame_infos):
inp_sh = predictor_input_shape
prev_imgs=[]
next_imgs=[]
@ -24,7 +24,7 @@ def ConvertFaceAvatar (cfg, prev_temporal_frame_infos, frame_info, next_temporal
next_imgs.append( process_frame_info(next_temporal_frame_infos[i], inp_sh) )
img = process_frame_info(frame_info, inp_sh)
prd_f = cfg.predictor_func ( prev_imgs, img, next_imgs )
prd_f = predictor_func ( prev_imgs, img, next_imgs )
if cfg.super_resolution_mode != 0:
prd_f = cfg.superres_func(cfg.super_resolution_mode, prd_f)

View file

@ -9,7 +9,7 @@ from interact import interact as io
from utils.cv2_utils import *
def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
#if debug:
# debugs = [img_bgr.copy()]
@ -26,7 +26,7 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
out_img = img_bgr.copy()
out_merging_mask = None
output_size = cfg.predictor_input_shape[0]
output_size = predictor_input_shape[0]
if cfg.super_resolution_mode != 0:
output_size *= 2
@ -36,17 +36,19 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
dst_face_bgr = cv2.warpAffine( img_bgr , face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
predictor_input_bgr = cv2.resize (dst_face_bgr, cfg.predictor_input_shape[0:2] )
predictor_input_bgr = cv2.resize (dst_face_bgr, predictor_input_shape[0:2] )
if cfg.predictor_masked:
prd_face_bgr, prd_face_mask_a_0 = cfg.predictor_func (predictor_input_bgr)
prd_face_bgr = np.clip (prd_face_bgr, 0, 1.0 )
prd_face_mask_a_0 = np.clip (prd_face_mask_a_0, 0.0, 1.0)
predicted = predictor_func (predictor_input_bgr)
if isinstance(predicted, tuple):
#converter return bgr,mask
prd_face_bgr = np.clip (predicted[0], 0, 1.0)
prd_face_mask_a_0 = np.clip (predicted[1], 0, 1.0)
predictor_masked = True
else:
predicted = cfg.predictor_func (predictor_input_bgr)
#converter return bgr only, using dst mask
prd_face_bgr = np.clip (predicted, 0, 1.0 )
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, cfg.predictor_input_shape[0:2] )
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, predictor_input_shape[0:2] )
predictor_masked = False
if cfg.super_resolution_mode:
#if debug:
@ -57,7 +59,7 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
#if debug:
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
if cfg.predictor_masked:
if predictor_masked:
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
else:
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
@ -198,7 +200,7 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
# debugs += [img_face_mask_aaa.copy()]
if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
if cfg.color_transfer_mode == 1:
if cfg.color_transfer_mode == 1: #rct
#if debug:
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
@ -211,8 +213,8 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
elif cfg.color_transfer_mode == 2:
#if debug:
elif cfg.color_transfer_mode == 2: #lct
#if debug:
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
prd_face_bgr = imagelib.linear_color_transfer (prd_face_bgr, dst_face_bgr)
@ -220,7 +222,14 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
#if debug:
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
elif cfg.color_transfer_mode == 3: #ebs
#if debug:
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
prd_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
np.clip( (prd_face_bgr*255), 0, 255).astype(np.uint8), )#prd_face_mask_a
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
if cfg.mode == 'hist-match-bw':
prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )
@ -249,34 +258,39 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
if cfg.mode == 'hist-match-bw':
prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
out_img = np.clip(out_img, 0.0, 1.0)
#if debug:
# debugs += [out_img.copy()]
if cfg.mode == 'overlay':
pass
if 'seamless' in cfg.mode:
#mask used for cv2.seamlessClone
img_face_seamless_mask_a = None
#mask used for cv2.seamlessClone
img_face_mask_a = img_face_mask_aaa[...,0:1]
if cfg.mode == 'seamless2':
img_face_mask_a = cv2.warpAffine( img_face_mask_a, face_output_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
img_face_seamless_mask_a = None
for i in range(1,10):
a = img_face_mask_a > i / 10.0
if len(np.argwhere(a)) == 0:
continue
img_face_seamless_mask_a = img_face_mask_aaa[...,0:1].copy()
img_face_seamless_mask_a = img_face_mask_a.copy()
img_face_seamless_mask_a[a] = 1.0
img_face_seamless_mask_a[img_face_seamless_mask_a <= i / 10.0] = 0.0
break
if cfg.mode == 'seamless2':
face_seamless = imagelib.seamless_clone ( prd_face_bgr, dst_face_bgr, img_face_seamless_mask_a )
out_img = cv2.warpAffine( face_seamless, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
else:
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
out_img = np.clip(out_img, 0.0, 1.0)
if 'seamless' in cfg.mode and cfg.mode != 'seamless2':
try:
#calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering (not flickering)
l,t,w,h = cv2.boundingRect( (img_face_seamless_mask_a*255).astype(np.uint8) )
s_maskx, s_masky = int(l+w/2), int(t+h/2)
out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), img_bgr_uint8, (img_face_seamless_mask_a*255).astype(np.uint8), (s_maskx,s_masky) , cv2.NORMAL_CLONE )
out_img = out_img.astype(dtype=np.float32) / 255.0
except Exception as e:
@ -301,8 +315,8 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
# debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
face_mask_aaa = cv2.warpAffine( img_face_mask_aaa, face_mat, (output_size, output_size) )
out_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (out_face_bgr*255).astype(np.uint8), 0, 255),
np.clip( (dst_face_bgr*255).astype(np.uint8), 0, 255),
out_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8),
np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
source_mask=face_mask_aaa, target_mask=face_mask_aaa)
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
@ -318,7 +332,15 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
#if debug:
# debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
elif cfg.color_transfer_mode == 3: #ebs
#if debug:
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
out_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8), )
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
if cfg.mode == 'seamless-hist-match':
out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)
@ -359,14 +381,14 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
return out_img, out_merging_mask
def ConvertMasked (cfg, frame_info):
def ConvertMasked (predictor_func, predictor_input_shape, cfg, frame_info):
img_bgr_uint8 = cv2_imread(frame_info.filename)
img_bgr_uint8 = imagelib.normalize_channels (img_bgr_uint8, 3)
img_bgr = img_bgr_uint8.astype(np.float32) / 255.0
outs = []
for face_num, img_landmarks in enumerate( frame_info.landmarks_list ):
out_img, out_img_merging_mask = ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_landmarks)
out_img, out_img_merging_mask = ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, img_bgr_uint8, img_bgr, img_landmarks)
outs += [ (out_img, out_img_merging_mask) ]
#Combining multiple face outputs

View file

@ -14,16 +14,14 @@ class ConverterConfig(object):
TYPE_IMAGE = 3
TYPE_IMAGE_WITH_LANDMARKS = 4
def __init__(self, type=0, predictor_func=None,
predictor_input_shape=None):
def __init__(self, type=0):
self.type = type
self.predictor_func = predictor_func
self.predictor_input_shape = predictor_input_shape
self.superres_func = None
self.sharpen_func = None
self.fanseg_input_size = None
self.fanseg_extract_func = None
self.ebs_ct_func = None
self.super_res_dict = {0:"None", 1:'RankSRGAN'}
self.sharpen_dict = {0:"None", 1:'box', 2:'gaussian'}
@ -84,40 +82,46 @@ class ConverterConfig(object):
r += f"super_resolution_mode : {self.super_res_dict[self.super_resolution_mode]}\n"
return r
mode_dict = {0:'original',
1:'overlay',
2:'hist-match',
3:'seamless2',
4:'seamless',
5:'seamless-hist-match',
6:'raw-rgb',
7:'raw-rgb-mask',
8:'raw-mask-only',
9:'raw-predicted-only'}
full_face_mask_mode_dict = {1:'learned',
2:'dst',
3:'FAN-prd',
4:'FAN-dst',
5:'FAN-prd*FAN-dst',
6:'learned*FAN-prd*FAN-dst'}
half_face_mask_mode_dict = {1:'learned',
2:'dst',
4:'FAN-dst',
7:'learned*FAN-dst'}
ctm_dict = { 0: "None", 1:"rct", 2:"lct", 3:"ebs" }
ctm_str_dict = {None:0, "rct":1, "lct": 2, "ebs":3 }
class ConverterConfigMasked(ConverterConfig):
def __init__(self, predictor_func=None,
predictor_input_shape=None,
predictor_masked=True,
face_type=FaceType.FULL,
def __init__(self, face_type=FaceType.FULL,
default_mode = 4,
base_erode_mask_modifier = 0,
base_blur_mask_modifier = 0,
default_erode_mask_modifier = 0,
default_blur_mask_modifier = 0,
clip_hborder_mask_per = 0,
):
super().__init__(type=ConverterConfig.TYPE_MASKED,
predictor_func=predictor_func,
predictor_input_shape=predictor_input_shape,
)
if len(predictor_input_shape) != 3:
raise ValueError("ConverterConfigMasked: predictor_input_shape must be rank 3.")
if predictor_input_shape[0] != predictor_input_shape[1]:
raise ValueError("ConverterConfigMasked: predictor_input_shape must be a square.")
self.predictor_masked = predictor_masked
super().__init__(type=ConverterConfig.TYPE_MASKED)
self.face_type = face_type
if self.face_type not in [FaceType.FULL, FaceType.HALF]:
raise ValueError("ConverterConfigMasked supports only full or half face masks.")
self.default_mode = default_mode
self.base_erode_mask_modifier = base_erode_mask_modifier
self.base_blur_mask_modifier = base_blur_mask_modifier
self.default_erode_mask_modifier = default_erode_mask_modifier
self.default_blur_mask_modifier = default_blur_mask_modifier
self.clip_hborder_mask_per = clip_hborder_mask_per
#default changeable params
@ -133,37 +137,11 @@ class ConverterConfigMasked(ConverterConfig):
self.color_degrade_power = 0
self.export_mask_alpha = False
self.mode_dict = {0:'original',
1:'overlay',
2:'hist-match',
3:'hist-match-bw',
4:'seamless',
5:'seamless-hist-match',
6:'raw-rgb',
7:'raw-rgb-mask',
8:'raw-mask-only',
9:'raw-predicted-only'}
self.full_face_mask_mode_dict = {1:'learned',
2:'dst',
3:'FAN-prd',
4:'FAN-dst',
5:'FAN-prd*FAN-dst',
6:'learned*FAN-prd*FAN-dst'}
self.half_face_mask_mode_dict = {1:'learned',
2:'dst',
4:'FAN-dst',
7:'learned*FAN-dst'}
self.ctm_dict = { 0: "None", 1:"rct", 2:"lct" }
self.ctm_str_dict = {None:0, "rct":1, "lct": 2 }
def copy(self):
return copy.copy(self)
def set_mode (self, mode):
self.mode = self.mode_dict.get (mode, self.mode_dict[self.default_mode] )
self.mode = mode_dict.get (mode, mode_dict[self.default_mode] )
def toggle_masked_hist_match(self):
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
@ -175,16 +153,16 @@ class ConverterConfigMasked(ConverterConfig):
def toggle_mask_mode(self):
if self.face_type == FaceType.FULL:
a = list( self.full_face_mask_mode_dict.keys() )
a = list( full_face_mask_mode_dict.keys() )
else:
a = list( self.half_face_mask_mode_dict.keys() )
a = list( half_face_mask_mode_dict.keys() )
self.mask_mode = a[ (a.index(self.mask_mode)+1) % len(a) ]
def add_erode_mask_modifier(self, diff):
self.erode_mask_modifier = np.clip ( self.erode_mask_modifier+diff , -200, 200)
self.erode_mask_modifier = np.clip ( self.erode_mask_modifier+diff , -400, 400)
def add_blur_mask_modifier(self, diff):
self.blur_mask_modifier = np.clip ( self.blur_mask_modifier+diff , -200, 200)
self.blur_mask_modifier = np.clip ( self.blur_mask_modifier+diff , -400, 400)
def add_motion_blur_power(self, diff):
self.motion_blur_power = np.clip ( self.motion_blur_power+diff, 0, 100)
@ -193,7 +171,7 @@ class ConverterConfigMasked(ConverterConfig):
self.output_face_scale = np.clip ( self.output_face_scale+diff , -50, 50)
def toggle_color_transfer_mode(self):
self.color_transfer_mode = (self.color_transfer_mode+1) % 3
self.color_transfer_mode = (self.color_transfer_mode+1) % ( max(ctm_dict.keys())+1 )
def add_color_degrade_power(self, diff):
self.color_degrade_power = np.clip ( self.color_degrade_power+diff , 0, 100)
@ -204,13 +182,13 @@ class ConverterConfigMasked(ConverterConfig):
def ask_settings(self):
s = """Choose mode: \n"""
for key in self.mode_dict.keys():
s += f"""({key}) {self.mode_dict[key]}\n"""
for key in mode_dict.keys():
s += f"""({key}) {mode_dict[key]}\n"""
s += f"""Default: {self.default_mode} : """
mode = io.input_int (s, self.default_mode)
self.mode = self.mode_dict.get (mode, self.mode_dict[self.default_mode] )
self.mode = mode_dict.get (mode, mode_dict[self.default_mode] )
if 'raw' not in self.mode:
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
@ -221,28 +199,28 @@ class ConverterConfigMasked(ConverterConfig):
if self.face_type == FaceType.FULL:
s = """Choose mask mode: \n"""
for key in self.full_face_mask_mode_dict.keys():
s += f"""({key}) {self.full_face_mask_mode_dict[key]}\n"""
for key in full_face_mask_mode_dict.keys():
s += f"""({key}) {full_face_mask_mode_dict[key]}\n"""
s += f"""?:help Default: 1 : """
self.mask_mode = io.input_int (s, 1, valid_list=self.full_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks.")
self.mask_mode = io.input_int (s, 1, valid_list=full_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks.")
else:
s = """Choose mask mode: \n"""
for key in self.half_face_mask_mode_dict.keys():
s += f"""({key}) {self.half_face_mask_mode_dict[key]}\n"""
for key in half_face_mask_mode_dict.keys():
s += f"""({key}) {half_face_mask_mode_dict[key]}\n"""
s += f"""?:help , Default: 1 : """
self.mask_mode = io.input_int (s, 1, valid_list=self.half_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images.")
self.mask_mode = io.input_int (s, 1, valid_list=half_face_mask_mode_dict.keys(), help_message="If you learned the mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images.")
if 'raw' not in self.mode:
self.erode_mask_modifier = self.base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (self.default_erode_mask_modifier), self.default_erode_mask_modifier), -200, 200)
self.blur_mask_modifier = self.base_blur_mask_modifier + np.clip ( io.input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (self.default_blur_mask_modifier), self.default_blur_mask_modifier), -200, 200)
self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier [-400..400] (skip:%d) : " % 0, 0), -400, 400)
self.blur_mask_modifier = np.clip ( io.input_int ("Choose blur mask modifier [-400..400] (skip:%d) : " % 0, 0), -400, 400)
self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power [0..100] (skip:%d) : " % (0), 0), 0, 100)
self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0), -50, 50)
if 'raw' not in self.mode:
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
self.color_transfer_mode = self.ctm_str_dict[self.color_transfer_mode]
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct/ebs skip:None ) : ", None, ctm_str_dict.keys() )
self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]
super().ask_settings()
@ -284,9 +262,9 @@ class ConverterConfigMasked(ConverterConfig):
r += f"""hist_match_threshold: {self.hist_match_threshold}\n"""
if self.face_type == FaceType.FULL:
r += f"""mask_mode: { self.full_face_mask_mode_dict[self.mask_mode] }\n"""
r += f"""mask_mode: { full_face_mask_mode_dict[self.mask_mode] }\n"""
else:
r += f"""mask_mode: { self.half_face_mask_mode_dict[self.mask_mode] }\n"""
r += f"""mask_mode: { half_face_mask_mode_dict[self.mask_mode] }\n"""
if 'raw' not in self.mode:
r += (f"""erode_mask_modifier: {self.erode_mask_modifier}\n"""
@ -296,7 +274,7 @@ class ConverterConfigMasked(ConverterConfig):
r += f"""output_face_scale: {self.output_face_scale}\n"""
if 'raw' not in self.mode:
r += f"""color_transfer_mode: { self.ctm_dict[self.color_transfer_mode]}\n"""
r += f"""color_transfer_mode: { ctm_dict[self.color_transfer_mode]}\n"""
r += super().__str__()
@ -311,14 +289,8 @@ class ConverterConfigMasked(ConverterConfig):
class ConverterConfigFaceAvatar(ConverterConfig):
def __init__(self, predictor_func=None,
predictor_input_shape=None,
temporal_face_count=0
):
super().__init__(type=ConverterConfig.TYPE_FACE_AVATAR,
predictor_func=predictor_func,
predictor_input_shape=predictor_input_shape
)
def __init__(self, temporal_face_count=0):
super().__init__(type=ConverterConfig.TYPE_FACE_AVATAR)
self.temporal_face_count = temporal_face_count
#changeable params