merge - added support for custom type

This commit is contained in:
seranus 2021-08-20 15:01:39 +02:00
commit 094af22636
4 changed files with 35 additions and 11 deletions

View file

@ -122,7 +122,7 @@ def main (model_class_name=None,
alignments[ source_filename_stem ] = []
alignments_ar = alignments[ source_filename_stem ]
alignments_ar.append ( (dflimg.get_source_landmarks(), filepath, source_filepath ) )
alignments_ar.append ( (dflimg.get_source_landmarks(), filepath, source_filepath, dflimg ) )
if len(alignments_ar) > 1:
multiple_faces_detected = True
@ -135,11 +135,11 @@ def main (model_class_name=None,
for a_key in list(alignments.keys()):
a_ar = alignments[a_key]
if len(a_ar) > 1:
for _, filepath, source_filepath in a_ar:
for _, filepath, source_filepath, _ in a_ar:
io.log_info (f"alignment {filepath.name} refers to {source_filepath.name} ")
io.log_info ("")
alignments[a_key] = [ a[0] for a in a_ar]
alignments[a_key] = [ [a[0], a[3]] for a in a_ar]
if multiple_faces_detected:
io.log_info ("It is strongly recommended to process the faces separatelly.")
@ -147,7 +147,9 @@ def main (model_class_name=None,
io.log_info ("")
frames = [ InteractiveMergerSubprocessor.Frame( frame_info=FrameInfo(filepath=Path(p),
landmarks_list=alignments.get(Path(p).stem, None)
landmarks_list=[alignments.get(Path(p).stem, None)[0][0]] if alignments.get(Path(p).stem, None) != None else None,
dfl_images_list=[alignments.get(Path(p).stem, None)[0][1]] if alignments.get(Path(p).stem, None) != None else None
# landmarks_list = alignments_orig.get(Path(p).stem, None)
)
)
for p in input_path_image_paths ]

View file

@ -1,8 +1,9 @@
from pathlib import Path
class FrameInfo(object):
def __init__(self, filepath=None, landmarks_list=None):
def __init__(self, filepath=None, landmarks_list=None, dfl_images_list=None):
self.filepath = filepath
self.landmarks_list = landmarks_list or []
self.dfl_images_list = dfl_images_list or []
self.motion_deg = 0
self.motion_power = 0

View file

@ -15,7 +15,7 @@ xseg_input_size = 256
def MergeMaskedFace (predictor_func, predictor_input_shape,
face_enhancer_func,
xseg_256_extract_func,
cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks, dfl_img):
img_size = img_bgr.shape[1], img_bgr.shape[0]
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
@ -26,13 +26,34 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
if cfg.super_resolution_power != 0:
output_size *= 4
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type)
face_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale)
if cfg.face_type == FaceType.CUSTOM:
# resize
face_image_size = dfl_img.get_shape()[0]
frame_points = LandmarksProcessor.transform_points ( np.float32([(0, 0), (face_image_size, 0), (face_image_size, face_image_size)]),
dfl_img.get_image_to_face_mat(), True)
face_mat = cv2.getAffineTransform(frame_points, np.float32(( (0,0),(output_size,0),(output_size,output_size) )))
scale_offset = 0.01 * cfg.output_face_scale
face_output_mat = cv2.getAffineTransform(frame_points, np.float32(( (0 + scale_offset,0 + scale_offset),
(output_size - scale_offset,0 + scale_offset),(output_size - scale_offset,output_size - scale_offset) )))
else:
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type)
face_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale)
if mask_subres_size == output_size:
face_mask_output_mat = face_output_mat
else:
face_mask_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, mask_subres_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale)
if cfg.face_type == FaceType.CUSTOM:
# resize
face_image_size = dfl_img.get_shape()[0]
frame_points = LandmarksProcessor.transform_points ( np.float32([(0, 0), (face_image_size, 0), (face_image_size, face_image_size)]),
dfl_img.get_image_to_face_mat(), True)
scale_offset = 0.01 * cfg.output_face_scale
face_mask_output_mat = cv2.getAffineTransform(frame_points, np.float32(( (0 + scale_offset,0 + scale_offset),
(mask_subres_size - scale_offset,0 + scale_offset),(mask_subres_size - scale_offset,mask_subres_size - scale_offset) )))
else:
face_mask_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, mask_subres_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale)
dst_face_bgr = cv2.warpAffine( img_bgr , face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
dst_face_bgr = np.clip(dst_face_bgr, 0, 1)
@ -332,7 +353,7 @@ def MergeMasked (predictor_func,
outs = []
for face_num, img_landmarks in enumerate( frame_info.landmarks_list ):
out_img, out_img_merging_mask = MergeMaskedFace (predictor_func, predictor_input_shape, face_enhancer_func, xseg_256_extract_func, cfg, frame_info, img_bgr_uint8, img_bgr, img_landmarks)
out_img, out_img_merging_mask = MergeMaskedFace (predictor_func, predictor_input_shape, face_enhancer_func, xseg_256_extract_func, cfg, frame_info, img_bgr_uint8, img_bgr, img_landmarks, frame_info.dfl_images_list[face_num])
outs += [ (out_img, out_img_merging_mask) ]
#Combining multiple face outputs

View file

@ -120,7 +120,7 @@ class MergerConfigMasked(MergerConfig):
super().__init__(type=MergerConfig.TYPE_MASKED, **kwargs)
self.face_type = face_type
if self.face_type not in [FaceType.HALF, FaceType.MID_FULL, FaceType.FULL, FaceType.WHOLE_FACE, FaceType.HEAD ]:
if self.face_type not in [FaceType.HALF, FaceType.MID_FULL, FaceType.FULL, FaceType.WHOLE_FACE, FaceType.HEAD, FaceType.CUSTOM ]:
raise ValueError("MergerConfigMasked does not support this type of face.")
self.default_mode = default_mode