mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-20 21:43:21 -07:00
converter:
fixed crashes removed useless 'ebs' color transfer changed keys for color degrade added image degrade via denoise - same as denoise extracted data_dst.bat , but you can control this option directly in the interactive converter added image degrade via bicubic downscale and upscale SAEHD: default ae_dims for df now 256.
This commit is contained in:
parent
374d8c2388
commit
770c70d778
8 changed files with 274 additions and 57 deletions
|
@ -8,10 +8,8 @@ from facelib import FaceType, LandmarksProcessor
|
|||
from interact import interact as io
|
||||
from utils.cv2_utils import *
|
||||
|
||||
|
||||
def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
|
||||
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
||||
|
||||
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
||||
|
||||
if cfg.mode == 'original':
|
||||
|
@ -85,7 +83,7 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
|||
full_face_fanchq_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, cfg.fanchq_input_size, face_type=FaceType.FULL)
|
||||
dst_face_fanchq_bgr = cv2.warpAffine(img_bgr, full_face_fanchq_mat, (cfg.fanchq_input_size,)*2, flags=cv2.INTER_CUBIC )
|
||||
dst_face_fanchq_mask = cfg.fanchq_extract_func( FaceType.FULL, dst_face_fanchq_bgr )
|
||||
|
||||
|
||||
if cfg.face_type == FaceType.FULL:
|
||||
FANCHQ_dst_face_mask_a_0 = cv2.resize (dst_face_fanchq_mask, (output_size,output_size), cv2.INTER_CUBIC)
|
||||
else:
|
||||
|
@ -110,7 +108,7 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
|||
prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_dst_face_mask_a_0
|
||||
#elif cfg.mask_mode == 8: #FANCHQ-dst
|
||||
# prd_face_mask_a_0 = FANCHQ_dst_face_mask_a_0
|
||||
|
||||
|
||||
prd_face_mask_a_0[ prd_face_mask_a_0 < 0.001 ] = 0.0
|
||||
|
||||
prd_face_mask_a = prd_face_mask_a_0[...,np.newaxis]
|
||||
|
@ -231,7 +229,7 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
|||
hist_match_2 = dst_face_bgr*hist_mask_a + white
|
||||
hist_match_2[ hist_match_1 > 1.0 ] = 1.0
|
||||
|
||||
prd_face_bgr = imagelib.color_hist_match(hist_match_1, hist_match_2, cfg.hist_match_threshold )
|
||||
prd_face_bgr = imagelib.color_hist_match(hist_match_1, hist_match_2, cfg.hist_match_threshold ).astype(dtype=np.float32)
|
||||
|
||||
if cfg.mode == 'hist-match-bw':
|
||||
prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)
|
||||
|
@ -254,13 +252,11 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
|||
break
|
||||
|
||||
if cfg.mode == 'seamless2':
|
||||
|
||||
face_seamless = imagelib.seamless_clone ( prd_face_bgr, dst_face_bgr, img_face_seamless_mask_a )
|
||||
|
||||
out_img = cv2.warpAffine( face_seamless, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||
else:
|
||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||
|
||||
|
||||
out_img = np.clip(out_img, 0.0, 1.0)
|
||||
|
||||
if 'seamless' in cfg.mode and cfg.mode != 'seamless2':
|
||||
|
@ -278,7 +274,8 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
|||
raise Exception("Seamless fail: " + e_str) #reraise MemoryError in order to reprocess this data by other processes
|
||||
else:
|
||||
print ("Seamless fail: " + e_str)
|
||||
|
||||
|
||||
|
||||
out_img = img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa)
|
||||
|
||||
out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) )
|
||||
|
@ -322,6 +319,23 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
|
|||
if cfg.blursharpen_amount != 0:
|
||||
out_face_bgr = cfg.blursharpen_func ( out_face_bgr, cfg.sharpen_mode, 3, cfg.blursharpen_amount)
|
||||
|
||||
|
||||
if cfg.image_denoise_power != 0:
|
||||
n = cfg.image_denoise_power
|
||||
while n > 0:
|
||||
img_bgr_denoised = cv2.medianBlur(img_bgr, 5)
|
||||
if int(n / 100) != 0:
|
||||
img_bgr = img_bgr_denoised
|
||||
else:
|
||||
pass_power = (n % 100) / 100.0
|
||||
img_bgr = img_bgr*(1.0-pass_power)+img_bgr_denoised*pass_power
|
||||
n = max(n-10,0)
|
||||
|
||||
if cfg.bicubic_degrade_power != 0:
|
||||
p = 1.0 - cfg.bicubic_degrade_power / 101.0
|
||||
img_bgr_downscaled = cv2.resize (img_bgr, ( int(img_size[0]*p), int(img_size[1]*p ) ), cv2.INTER_CUBIC)
|
||||
img_bgr = cv2.resize (img_bgr_downscaled, img_size, cv2.INTER_CUBIC)
|
||||
|
||||
new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||
out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 )
|
||||
|
||||
|
|
|
@ -14,14 +14,20 @@ class ConverterConfig(object):
|
|||
TYPE_IMAGE = 3
|
||||
TYPE_IMAGE_WITH_LANDMARKS = 4
|
||||
|
||||
def __init__(self, type=0):
|
||||
def __init__(self, type=0,
|
||||
|
||||
super_resolution_mode=0,
|
||||
sharpen_mode=0,
|
||||
blursharpen_amount=0,
|
||||
**kwargs
|
||||
):
|
||||
self.type = type
|
||||
|
||||
self.superres_func = None
|
||||
self.blursharpen_func = None
|
||||
self.fanseg_input_size = None
|
||||
self.fanseg_extract_func = None
|
||||
|
||||
|
||||
self.fanchq_input_size = None
|
||||
self.fanchq_extract_func = None
|
||||
self.ebs_ct_func = None
|
||||
|
@ -30,9 +36,9 @@ class ConverterConfig(object):
|
|||
self.sharpen_dict = {0:"None", 1:'box', 2:'gaussian'}
|
||||
|
||||
#default changeable params
|
||||
self.super_resolution_mode = 0
|
||||
self.sharpen_mode = 0
|
||||
self.blursharpen_amount = 0
|
||||
self.super_resolution_mode = super_resolution_mode
|
||||
self.sharpen_mode = sharpen_mode
|
||||
self.blursharpen_amount = blursharpen_amount
|
||||
|
||||
def copy(self):
|
||||
return copy.copy(self)
|
||||
|
@ -65,6 +71,16 @@ class ConverterConfig(object):
|
|||
a = list( self.super_res_dict.keys() )
|
||||
self.super_resolution_mode = a[ (a.index(self.super_resolution_mode)+1) % len(a) ]
|
||||
|
||||
#overridable
|
||||
def get_config(self):
|
||||
d = self.__dict__.copy()
|
||||
d.pop('type')
|
||||
return d
|
||||
return {'sharpen_mode':self.sharpen_mode,
|
||||
'blursharpen_amount':self.blursharpen_amount,
|
||||
'super_resolution_mode':self.super_resolution_mode
|
||||
}
|
||||
|
||||
#overridable
|
||||
def __eq__(self, other):
|
||||
#check equality of changeable params
|
||||
|
@ -80,16 +96,16 @@ class ConverterConfig(object):
|
|||
def to_string(self, filename):
|
||||
r = ""
|
||||
r += f"sharpen_mode : {self.sharpen_dict[self.sharpen_mode]}\n"
|
||||
r += f"blursharpen_amount : {self.blursharpen_amount}\n"
|
||||
r += f"blursharpen_amount : {self.blursharpen_amount}\n"
|
||||
r += f"super_resolution_mode : {self.super_res_dict[self.super_resolution_mode]}\n"
|
||||
return r
|
||||
|
||||
|
||||
mode_dict = {0:'original',
|
||||
1:'overlay',
|
||||
2:'hist-match',
|
||||
3:'seamless2',
|
||||
4:'seamless',
|
||||
5:'seamless-hist-match',
|
||||
5:'seamless-hist-match',
|
||||
6:'raw-rgb',
|
||||
7:'raw-rgb-mask',
|
||||
8:'raw-mask-only',
|
||||
|
@ -115,10 +131,25 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
def __init__(self, face_type=FaceType.FULL,
|
||||
default_mode = 4,
|
||||
clip_hborder_mask_per = 0,
|
||||
|
||||
mode='overlay',
|
||||
masked_hist_match=True,
|
||||
hist_match_threshold = 238,
|
||||
mask_mode = 1,
|
||||
erode_mask_modifier = 0,
|
||||
blur_mask_modifier = 0,
|
||||
motion_blur_power = 0,
|
||||
output_face_scale = 0,
|
||||
color_transfer_mode = 0,
|
||||
image_denoise_power = 0,
|
||||
bicubic_degrade_power = 0,
|
||||
color_degrade_power = 0,
|
||||
export_mask_alpha = False,
|
||||
**kwargs
|
||||
):
|
||||
|
||||
super().__init__(type=ConverterConfig.TYPE_MASKED)
|
||||
|
||||
super().__init__(type=ConverterConfig.TYPE_MASKED, **kwargs)
|
||||
|
||||
self.face_type = face_type
|
||||
if self.face_type not in [FaceType.HALF, FaceType.MID_FULL, FaceType.FULL ]:
|
||||
raise ValueError("ConverterConfigMasked does not support this type of face.")
|
||||
|
@ -127,17 +158,19 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
self.clip_hborder_mask_per = clip_hborder_mask_per
|
||||
|
||||
#default changeable params
|
||||
self.mode = 'overlay'
|
||||
self.masked_hist_match = True
|
||||
self.hist_match_threshold = 238
|
||||
self.mask_mode = 1
|
||||
self.erode_mask_modifier = 0
|
||||
self.blur_mask_modifier = 0
|
||||
self.motion_blur_power = 0
|
||||
self.output_face_scale = 0
|
||||
self.color_transfer_mode = 0
|
||||
self.color_degrade_power = 0
|
||||
self.export_mask_alpha = False
|
||||
self.mode = mode
|
||||
self.masked_hist_match = masked_hist_match
|
||||
self.hist_match_threshold = hist_match_threshold
|
||||
self.mask_mode = mask_mode
|
||||
self.erode_mask_modifier = erode_mask_modifier
|
||||
self.blur_mask_modifier = blur_mask_modifier
|
||||
self.motion_blur_power = motion_blur_power
|
||||
self.output_face_scale = output_face_scale
|
||||
self.color_transfer_mode = color_transfer_mode
|
||||
self.image_denoise_power = image_denoise_power
|
||||
self.bicubic_degrade_power = bicubic_degrade_power
|
||||
self.color_degrade_power = color_degrade_power
|
||||
self.export_mask_alpha = export_mask_alpha
|
||||
|
||||
def copy(self):
|
||||
return copy.copy(self)
|
||||
|
@ -178,6 +211,12 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
def add_color_degrade_power(self, diff):
|
||||
self.color_degrade_power = np.clip ( self.color_degrade_power+diff , 0, 100)
|
||||
|
||||
def add_image_denoise_power(self, diff):
|
||||
self.image_denoise_power = np.clip ( self.image_denoise_power+diff, 0, 500)
|
||||
|
||||
def add_bicubic_degrade_power(self, diff):
|
||||
self.bicubic_degrade_power = np.clip ( self.bicubic_degrade_power+diff, 0, 100)
|
||||
|
||||
def toggle_export_mask_alpha(self):
|
||||
self.export_mask_alpha = not self.export_mask_alpha
|
||||
|
||||
|
@ -227,6 +266,8 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
super().ask_settings()
|
||||
|
||||
if 'raw' not in self.mode:
|
||||
self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power [0..500] (skip:%d) : " % (0), 0), 0, 500)
|
||||
self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power [0..100] (skip:%d) : " % (0), 0), 0, 100)
|
||||
self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
|
||||
self.export_mask_alpha = io.input_bool("Export png with alpha channel of the mask? (y/n skip:n) : ", False)
|
||||
|
||||
|
@ -246,6 +287,8 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
self.motion_blur_power == other.motion_blur_power and \
|
||||
self.output_face_scale == other.output_face_scale and \
|
||||
self.color_transfer_mode == other.color_transfer_mode and \
|
||||
self.image_denoise_power == other.image_denoise_power and \
|
||||
self.bicubic_degrade_power == other.bicubic_degrade_power and \
|
||||
self.color_degrade_power == other.color_degrade_power and \
|
||||
self.export_mask_alpha == other.export_mask_alpha
|
||||
|
||||
|
@ -281,7 +324,9 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
r += super().to_string(filename)
|
||||
|
||||
if 'raw' not in self.mode:
|
||||
r += (f"""color_degrade_power: {self.color_degrade_power}\n"""
|
||||
r += (f"""image_denoise_power: {self.image_denoise_power}\n"""
|
||||
f"""bicubic_degrade_power: {self.bicubic_degrade_power}\n"""
|
||||
f"""color_degrade_power: {self.color_degrade_power}\n"""
|
||||
f"""export_mask_alpha: {self.export_mask_alpha}\n""")
|
||||
|
||||
r += "================"
|
||||
|
@ -291,12 +336,13 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
|
||||
class ConverterConfigFaceAvatar(ConverterConfig):
|
||||
|
||||
def __init__(self, temporal_face_count=0):
|
||||
def __init__(self, temporal_face_count=0,
|
||||
add_source_image=False):
|
||||
super().__init__(type=ConverterConfig.TYPE_FACE_AVATAR)
|
||||
self.temporal_face_count = temporal_face_count
|
||||
|
||||
#changeable params
|
||||
self.add_source_image = False
|
||||
self.add_source_image = add_source_image
|
||||
|
||||
def copy(self):
|
||||
return copy.copy(self)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue