mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 21:12:07 -07:00
Converter: added new color transfer modes: mkl, mkl-m, idt, idt-m
This commit is contained in:
parent
7ed38a8097
commit
bef4e5d33c
4 changed files with 109 additions and 93 deletions
|
@ -10,10 +10,6 @@ from utils.cv2_utils import *
|
||||||
|
|
||||||
|
|
||||||
def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
|
def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs = [img_bgr.copy()]
|
|
||||||
|
|
||||||
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
||||||
|
|
||||||
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
||||||
|
@ -51,13 +47,7 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
predictor_masked = False
|
predictor_masked = False
|
||||||
|
|
||||||
if cfg.super_resolution_mode:
|
if cfg.super_resolution_mode:
|
||||||
#if debug:
|
|
||||||
# tmp = cv2.resize (prd_face_bgr, (output_size,output_size), cv2.INTER_CUBIC)
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( tmp, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
prd_face_bgr = cfg.superres_func(cfg.super_resolution_mode, prd_face_bgr)
|
prd_face_bgr = cfg.superres_func(cfg.super_resolution_mode, prd_face_bgr)
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
if predictor_masked:
|
if predictor_masked:
|
||||||
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
|
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
|
||||||
|
@ -113,9 +103,6 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
img_face_mask_aaa = np.clip (img_face_mask_aaa, 0.0, 1.0)
|
img_face_mask_aaa = np.clip (img_face_mask_aaa, 0.0, 1.0)
|
||||||
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0 #get rid of noise
|
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0 #get rid of noise
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs += [img_face_mask_aaa.copy()]
|
|
||||||
|
|
||||||
if 'raw' in cfg.mode:
|
if 'raw' in cfg.mode:
|
||||||
face_corner_pts = np.array ([ [0,0], [output_size-1,0], [output_size-1,output_size-1], [0,output_size-1] ], dtype=np.float32)
|
face_corner_pts = np.array ([ [0,0], [output_size-1,0], [output_size-1,output_size-1], [0,output_size-1] ], dtype=np.float32)
|
||||||
square_mask = np.zeros(img_bgr.shape, dtype=np.float32)
|
square_mask = np.zeros(img_bgr.shape, dtype=np.float32)
|
||||||
|
@ -157,14 +144,9 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
if len(ar) > 0:
|
if len(ar) > 0:
|
||||||
lenx, leny = np.mean ( ar, axis=0 )
|
lenx, leny = np.mean ( ar, axis=0 )
|
||||||
lowest_len = min (lenx, leny)
|
lowest_len = min (lenx, leny)
|
||||||
#if debug:
|
|
||||||
# io.log_info ("lenx/leny:(%d/%d) " % (lenx, leny ) )
|
|
||||||
# io.log_info ("lowest_len = %f" % (lowest_len) )
|
|
||||||
|
|
||||||
if cfg.erode_mask_modifier != 0:
|
if cfg.erode_mask_modifier != 0:
|
||||||
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*cfg.erode_mask_modifier )
|
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*cfg.erode_mask_modifier )
|
||||||
#if debug:
|
|
||||||
# io.log_info ("erode_size = %d" % (ero) )
|
|
||||||
if ero > 0:
|
if ero > 0:
|
||||||
img_face_mask_aaa = cv2.erode(img_face_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
img_face_mask_aaa = cv2.erode(img_face_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
||||||
elif ero < 0:
|
elif ero < 0:
|
||||||
|
@ -183,61 +165,42 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
img_face_mask_aaa *= img_prd_hborder_rect_mask_a
|
img_face_mask_aaa *= img_prd_hborder_rect_mask_a
|
||||||
img_face_mask_aaa = np.clip( img_face_mask_aaa, 0, 1.0 )
|
img_face_mask_aaa = np.clip( img_face_mask_aaa, 0, 1.0 )
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs += [img_face_mask_aaa.copy()]
|
|
||||||
|
|
||||||
if cfg.blur_mask_modifier > 0:
|
if cfg.blur_mask_modifier > 0:
|
||||||
blur = int( lowest_len * 0.10 * 0.01*cfg.blur_mask_modifier )
|
blur = int( lowest_len * 0.10 * 0.01*cfg.blur_mask_modifier )
|
||||||
#if debug:
|
|
||||||
# io.log_info ("blur_size = %d" % (blur) )
|
|
||||||
if blur > 0:
|
if blur > 0:
|
||||||
img_face_mask_aaa = cv2.blur(img_face_mask_aaa, (blur, blur) )
|
img_face_mask_aaa = cv2.blur(img_face_mask_aaa, (blur, blur) )
|
||||||
|
|
||||||
img_face_mask_aaa = np.clip( img_face_mask_aaa, 0, 1.0 )
|
img_face_mask_aaa = np.clip( img_face_mask_aaa, 0, 1.0 )
|
||||||
|
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs += [img_face_mask_aaa.copy()]
|
|
||||||
|
|
||||||
if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
|
if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
|
||||||
if cfg.color_transfer_mode == 1: #rct
|
if cfg.color_transfer_mode == 1: #rct
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
prd_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (prd_face_bgr*255).astype(np.uint8), 0, 255),
|
prd_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (prd_face_bgr*255).astype(np.uint8), 0, 255),
|
||||||
np.clip( (dst_face_bgr*255).astype(np.uint8), 0, 255),
|
np.clip( (dst_face_bgr*255).astype(np.uint8), 0, 255),
|
||||||
source_mask=prd_face_mask_a, target_mask=prd_face_mask_a)
|
source_mask=prd_face_mask_a, target_mask=prd_face_mask_a)
|
||||||
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
|
|
||||||
elif cfg.color_transfer_mode == 2: #lct
|
elif cfg.color_transfer_mode == 2: #lct
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
prd_face_bgr = imagelib.linear_color_transfer (prd_face_bgr, dst_face_bgr)
|
prd_face_bgr = imagelib.linear_color_transfer (prd_face_bgr, dst_face_bgr)
|
||||||
prd_face_bgr = np.clip( prd_face_bgr, 0.0, 1.0)
|
prd_face_bgr = np.clip( prd_face_bgr, 0.0, 1.0)
|
||||||
|
elif cfg.color_transfer_mode == 3: #mkl
|
||||||
|
prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr, dst_face_bgr)
|
||||||
|
elif cfg.color_transfer_mode == 4: #mkl-m
|
||||||
|
prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
|
||||||
|
elif cfg.color_transfer_mode == 5: #idt
|
||||||
|
prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr, dst_face_bgr)
|
||||||
|
elif cfg.color_transfer_mode == 6: #idt-m
|
||||||
|
prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
|
||||||
|
|
||||||
#if debug:
|
elif cfg.color_transfer_mode == 7: #ebs
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
elif cfg.color_transfer_mode == 3: #ebs
|
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
prd_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
|
prd_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
|
||||||
np.clip( (prd_face_bgr*255), 0, 255).astype(np.uint8), )#prd_face_mask_a
|
np.clip( (prd_face_bgr*255), 0, 255).astype(np.uint8), )#prd_face_mask_a
|
||||||
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
||||||
|
|
||||||
if cfg.mode == 'hist-match-bw':
|
if cfg.mode == 'hist-match-bw':
|
||||||
prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
|
prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
|
||||||
prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )
|
prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )
|
||||||
|
|
||||||
if cfg.mode == 'hist-match' or cfg.mode == 'hist-match-bw':
|
if cfg.mode == 'hist-match' or cfg.mode == 'hist-match-bw':
|
||||||
#if debug:
|
|
||||||
# debugs += [ cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) ]
|
|
||||||
|
|
||||||
hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
||||||
|
|
||||||
if cfg.masked_hist_match:
|
if cfg.masked_hist_match:
|
||||||
|
@ -253,19 +216,16 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
|
|
||||||
prd_face_bgr = imagelib.color_hist_match(hist_match_1, hist_match_2, cfg.hist_match_threshold )
|
prd_face_bgr = imagelib.color_hist_match(hist_match_1, hist_match_2, cfg.hist_match_threshold )
|
||||||
|
|
||||||
#if cfg.masked_hist_match:
|
|
||||||
# prd_face_bgr -= white
|
|
||||||
|
|
||||||
if cfg.mode == 'hist-match-bw':
|
if cfg.mode == 'hist-match-bw':
|
||||||
prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)
|
prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)
|
||||||
|
|
||||||
if 'seamless' in cfg.mode:
|
if 'seamless' in cfg.mode:
|
||||||
#mask used for cv2.seamlessClone
|
#mask used for cv2.seamlessClone
|
||||||
img_face_mask_a = img_face_mask_aaa[...,0:1]
|
img_face_mask_a = img_face_mask_aaa[...,0:1]
|
||||||
|
|
||||||
if cfg.mode == 'seamless2':
|
if cfg.mode == 'seamless2':
|
||||||
img_face_mask_a = cv2.warpAffine( img_face_mask_a, face_output_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
|
img_face_mask_a = cv2.warpAffine( img_face_mask_a, face_output_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
|
||||||
|
|
||||||
img_face_seamless_mask_a = None
|
img_face_seamless_mask_a = None
|
||||||
for i in range(1,10):
|
for i in range(1,10):
|
||||||
a = img_face_mask_a > i / 10.0
|
a = img_face_mask_a > i / 10.0
|
||||||
|
@ -275,15 +235,15 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
img_face_seamless_mask_a[a] = 1.0
|
img_face_seamless_mask_a[a] = 1.0
|
||||||
img_face_seamless_mask_a[img_face_seamless_mask_a <= i / 10.0] = 0.0
|
img_face_seamless_mask_a[img_face_seamless_mask_a <= i / 10.0] = 0.0
|
||||||
break
|
break
|
||||||
|
|
||||||
if cfg.mode == 'seamless2':
|
if cfg.mode == 'seamless2':
|
||||||
|
|
||||||
face_seamless = imagelib.seamless_clone ( prd_face_bgr, dst_face_bgr, img_face_seamless_mask_a )
|
face_seamless = imagelib.seamless_clone ( prd_face_bgr, dst_face_bgr, img_face_seamless_mask_a )
|
||||||
|
|
||||||
out_img = cv2.warpAffine( face_seamless, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
out_img = cv2.warpAffine( face_seamless, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||||
else:
|
else:
|
||||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||||
|
|
||||||
out_img = np.clip(out_img, 0.0, 1.0)
|
out_img = np.clip(out_img, 0.0, 1.0)
|
||||||
|
|
||||||
if 'seamless' in cfg.mode and cfg.mode != 'seamless2':
|
if 'seamless' in cfg.mode and cfg.mode != 'seamless2':
|
||||||
|
@ -302,48 +262,37 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
else:
|
else:
|
||||||
print ("Seamless fail: " + e_str)
|
print ("Seamless fail: " + e_str)
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs += [out_img.copy()]
|
|
||||||
|
|
||||||
out_img = img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa)
|
out_img = img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa)
|
||||||
|
|
||||||
out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) )
|
out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) )
|
||||||
|
|
||||||
if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
|
if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
|
||||||
if cfg.color_transfer_mode == 1:
|
if cfg.color_transfer_mode == 1:
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
face_mask_aaa = cv2.warpAffine( img_face_mask_aaa, face_mat, (output_size, output_size) )
|
face_mask_aaa = cv2.warpAffine( img_face_mask_aaa, face_mat, (output_size, output_size) )
|
||||||
|
|
||||||
out_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8),
|
out_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8),
|
||||||
np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
|
np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
|
||||||
source_mask=face_mask_aaa, target_mask=face_mask_aaa)
|
source_mask=face_mask_aaa, target_mask=face_mask_aaa)
|
||||||
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
||||||
|
elif cfg.color_transfer_mode == 2: #lct
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
elif cfg.color_transfer_mode == 2:
|
|
||||||
#if debug:
|
|
||||||
# debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
|
||||||
|
|
||||||
out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr)
|
out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr)
|
||||||
out_face_bgr = np.clip( out_face_bgr, 0.0, 1.0)
|
out_face_bgr = np.clip( out_face_bgr, 0.0, 1.0)
|
||||||
|
elif cfg.color_transfer_mode == 3: #mkl
|
||||||
#if debug:
|
out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr, dst_face_bgr)
|
||||||
# debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
elif cfg.color_transfer_mode == 4: #mkl-m
|
||||||
|
out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
|
||||||
elif cfg.color_transfer_mode == 3: #ebs
|
elif cfg.color_transfer_mode == 5: #idt
|
||||||
#if debug:
|
out_face_bgr = imagelib.color_transfer_idt (out_face_bgr, dst_face_bgr)
|
||||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
elif cfg.color_transfer_mode == 6: #idt-m
|
||||||
|
out_face_bgr = imagelib.color_transfer_idt (out_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
|
||||||
|
elif cfg.color_transfer_mode == 7: #ebs
|
||||||
out_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
|
out_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
|
||||||
np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8), )
|
np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8), )
|
||||||
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
||||||
|
|
||||||
if cfg.mode == 'seamless-hist-match':
|
if cfg.mode == 'seamless-hist-match':
|
||||||
out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)
|
out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)
|
||||||
|
|
||||||
cfg_mp = cfg.motion_blur_power / 100.0
|
cfg_mp = cfg.motion_blur_power / 100.0
|
||||||
if cfg_mp != 0:
|
if cfg_mp != 0:
|
||||||
k_size = int(frame_info.motion_power*cfg_mp)
|
k_size = int(frame_info.motion_power*cfg_mp)
|
||||||
|
@ -355,14 +304,11 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
|
|
||||||
if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0:
|
if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0:
|
||||||
out_face_bgr = cfg.sharpen_func ( out_face_bgr, cfg.sharpen_mode, 3, cfg.sharpen_amount)
|
out_face_bgr = cfg.sharpen_func ( out_face_bgr, cfg.sharpen_mode, 3, cfg.sharpen_amount)
|
||||||
|
|
||||||
new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||||
out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 )
|
out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 )
|
||||||
|
|
||||||
|
|
||||||
if cfg.color_degrade_power != 0:
|
if cfg.color_degrade_power != 0:
|
||||||
#if debug:
|
|
||||||
# debugs += [out_img.copy()]
|
|
||||||
out_img_reduced = imagelib.reduce_colors(out_img, 256)
|
out_img_reduced = imagelib.reduce_colors(out_img, 256)
|
||||||
if cfg.color_degrade_power == 100:
|
if cfg.color_degrade_power == 100:
|
||||||
out_img = out_img_reduced
|
out_img = out_img_reduced
|
||||||
|
@ -374,10 +320,6 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
||||||
out_img = np.concatenate ( [out_img, img_face_mask_aaa[:,:,0:1]], -1 )
|
out_img = np.concatenate ( [out_img, img_face_mask_aaa[:,:,0:1]], -1 )
|
||||||
out_merging_mask = img_face_mask_aaa
|
out_merging_mask = img_face_mask_aaa
|
||||||
|
|
||||||
|
|
||||||
#if debug:
|
|
||||||
# debugs += [out_img.copy()]
|
|
||||||
|
|
||||||
return out_img, out_merging_mask
|
return out_img, out_merging_mask
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -105,8 +105,8 @@ half_face_mask_mode_dict = {1:'learned',
|
||||||
4:'FAN-dst',
|
4:'FAN-dst',
|
||||||
7:'learned*FAN-dst'}
|
7:'learned*FAN-dst'}
|
||||||
|
|
||||||
ctm_dict = { 0: "None", 1:"rct", 2:"lct", 3:"ebs" }
|
ctm_dict = { 0: "None", 1:"rct", 2:"lct", 3:"mkl", 4:"mkl-m", 5:"idt", 6:"idt-m", 7:"ebs" }
|
||||||
ctm_str_dict = {None:0, "rct":1, "lct": 2, "ebs":3 }
|
ctm_str_dict = {None:0, "rct":1, "lct":2, "mkl":3, "mkl-m":4, "idt":5, "idt-m":6, "ebs":7 }
|
||||||
|
|
||||||
class ConverterConfigMasked(ConverterConfig):
|
class ConverterConfigMasked(ConverterConfig):
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ from .warp import gen_warp_params, warp_by_params
|
||||||
|
|
||||||
from .reduce_colors import reduce_colors
|
from .reduce_colors import reduce_colors
|
||||||
|
|
||||||
from .color_transfer import color_hist_match, reinhard_color_transfer, linear_color_transfer, seamless_clone
|
from .color_transfer import color_transfer_mkl, color_transfer_idt, color_hist_match, reinhard_color_transfer, linear_color_transfer, seamless_clone
|
||||||
|
|
||||||
from .RankSRGAN import RankSRGAN
|
from .RankSRGAN import RankSRGAN
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,83 @@
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
import scipy as sp
|
||||||
import scipy.sparse
|
import scipy.sparse
|
||||||
from scipy.sparse.linalg import spsolve
|
from scipy.sparse.linalg import spsolve
|
||||||
|
|
||||||
|
def color_transfer_mkl(x0, x1):
|
||||||
|
eps = np.finfo(float).eps
|
||||||
|
|
||||||
|
h,w,c = x0.shape
|
||||||
|
h1,w1,c1 = x1.shape
|
||||||
|
|
||||||
|
x0 = x0.reshape ( (h*w,c) )
|
||||||
|
x1 = x1.reshape ( (h1*w1,c1) )
|
||||||
|
|
||||||
|
a = np.cov(x0.T)
|
||||||
|
b = np.cov(x1.T)
|
||||||
|
|
||||||
|
Da2, Ua = np.linalg.eig(a)
|
||||||
|
Da = np.diag(np.sqrt(Da2.clip(eps, None)))
|
||||||
|
|
||||||
|
C = np.dot(np.dot(np.dot(np.dot(Da, Ua.T), b), Ua), Da)
|
||||||
|
|
||||||
|
Dc2, Uc = np.linalg.eig(C)
|
||||||
|
Dc = np.diag(np.sqrt(Dc2.clip(eps, None)))
|
||||||
|
|
||||||
|
Da_inv = np.diag(1./(np.diag(Da)))
|
||||||
|
|
||||||
|
t = np.dot(np.dot(np.dot(np.dot(np.dot(np.dot(Ua, Da_inv), Uc), Dc), Uc.T), Da_inv), Ua.T)
|
||||||
|
|
||||||
|
mx0 = np.mean(x0, axis=0)
|
||||||
|
mx1 = np.mean(x1, axis=0)
|
||||||
|
|
||||||
|
result = np.dot(x0-mx0, t) + mx1
|
||||||
|
return np.clip ( result.reshape ( (h,w,c) ), 0, 1)
|
||||||
|
|
||||||
|
def color_transfer_idt(i0, i1, bins=256, n_rot=20):
|
||||||
|
relaxation = 1 / n_rot
|
||||||
|
h,w,c = i0.shape
|
||||||
|
h1,w1,c1 = i1.shape
|
||||||
|
|
||||||
|
i0 = i0.reshape ( (h*w,c) )
|
||||||
|
i1 = i1.reshape ( (h1*w1,c1) )
|
||||||
|
|
||||||
|
n_dims = c
|
||||||
|
|
||||||
|
d0 = i0.T
|
||||||
|
d1 = i1.T
|
||||||
|
|
||||||
|
for i in range(n_rot):
|
||||||
|
|
||||||
|
r = sp.stats.special_ortho_group.rvs(n_dims).astype(np.float32)
|
||||||
|
|
||||||
|
d0r = np.dot(r, d0)
|
||||||
|
d1r = np.dot(r, d1)
|
||||||
|
d_r = np.empty_like(d0)
|
||||||
|
|
||||||
|
for j in range(n_dims):
|
||||||
|
|
||||||
|
lo = min(d0r[j].min(), d1r[j].min())
|
||||||
|
hi = max(d0r[j].max(), d1r[j].max())
|
||||||
|
|
||||||
|
p0r, edges = np.histogram(d0r[j], bins=bins, range=[lo, hi])
|
||||||
|
p1r, _ = np.histogram(d1r[j], bins=bins, range=[lo, hi])
|
||||||
|
|
||||||
|
cp0r = p0r.cumsum().astype(np.float32)
|
||||||
|
cp0r /= cp0r[-1]
|
||||||
|
|
||||||
|
cp1r = p1r.cumsum().astype(np.float32)
|
||||||
|
cp1r /= cp1r[-1]
|
||||||
|
|
||||||
|
f = np.interp(cp0r, cp1r, edges[1:])
|
||||||
|
|
||||||
|
d_r[j] = np.interp(d0r[j], edges[1:], f, left=0, right=bins)
|
||||||
|
|
||||||
|
d0 = relaxation * np.linalg.solve(r, (d_r - d0r)) + d0
|
||||||
|
|
||||||
|
return np.clip ( d0.T.reshape ( (h,w,c) ), 0, 1)
|
||||||
|
|
||||||
def laplacian_matrix(n, m):
|
def laplacian_matrix(n, m):
|
||||||
mat_D = scipy.sparse.lil_matrix((m, m))
|
mat_D = scipy.sparse.lil_matrix((m, m))
|
||||||
mat_D.setdiag(-1, -1)
|
mat_D.setdiag(-1, -1)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue