mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-20 21:43:21 -07:00
updated pdf manuals for AVATAR model.
Avatar converter: added super resolution option. All converters: super resolution DCSCN network is now replaced by RankSRGAN
This commit is contained in:
parent
19c66286da
commit
c39ed9d9c9
15 changed files with 161 additions and 188 deletions
|
@ -26,6 +26,9 @@ def ConvertFaceAvatar (cfg, prev_temporal_frame_infos, frame_info, next_temporal
|
|||
|
||||
prd_f = cfg.predictor_func ( prev_imgs, img, next_imgs )
|
||||
|
||||
if cfg.super_resolution_mode != 0:
|
||||
prd_f = cfg.superres_func(cfg.super_resolution_mode, prd_f)
|
||||
|
||||
out_img = np.clip(prd_f, 0.0, 1.0)
|
||||
|
||||
if cfg.add_source_image:
|
||||
|
|
|
@ -27,7 +27,7 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
|
|||
out_merging_mask = None
|
||||
|
||||
output_size = cfg.predictor_input_shape[0]
|
||||
if cfg.super_resolution:
|
||||
if cfg.super_resolution_mode != 0:
|
||||
output_size *= 2
|
||||
|
||||
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type)
|
||||
|
@ -48,12 +48,12 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
|
|||
prd_face_bgr = np.clip (predicted, 0, 1.0 )
|
||||
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, cfg.predictor_input_shape[0:2] )
|
||||
|
||||
if cfg.super_resolution:
|
||||
if cfg.super_resolution_mode:
|
||||
#if debug:
|
||||
# tmp = cv2.resize (prd_face_bgr, (output_size,output_size), cv2.INTER_CUBIC)
|
||||
# debugs += [ np.clip( cv2.warpAffine( tmp, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
||||
|
||||
prd_face_bgr = cfg.dcscn_upscale_func(prd_face_bgr)
|
||||
prd_face_bgr = cfg.superres_func(cfg.super_resolution_mode, prd_face_bgr)
|
||||
#if debug:
|
||||
# debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
||||
|
||||
|
@ -335,7 +335,7 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar
|
|||
k_size = int(frame_info.motion_power*cfg_mp)
|
||||
if k_size >= 1:
|
||||
k_size = np.clip (k_size+1, 2, 50)
|
||||
if cfg.super_resolution:
|
||||
if cfg.super_resolution_mode:
|
||||
k_size *= 2
|
||||
out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) )
|
||||
new_out_face_bgr = imagelib.LinearMotionBlur (out_face_bgr, k_size , frame_info.motion_deg)
|
||||
|
|
|
@ -92,7 +92,7 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
self.motion_blur_power = 0
|
||||
self.output_face_scale = 0
|
||||
self.color_transfer_mode = 0
|
||||
self.super_resolution = False
|
||||
self.super_resolution_mode = 0
|
||||
self.color_degrade_power = 0
|
||||
self.export_mask_alpha = False
|
||||
|
||||
|
@ -118,9 +118,11 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
2:'dst',
|
||||
4:'FAN-dst',
|
||||
7:'learned*FAN-dst'}
|
||||
|
||||
|
||||
self.ctm_dict = { 0: "None", 1:"rct", 2:"lct" }
|
||||
self.ctm_str_dict = {None:0, "rct":1, "lct": 2 }
|
||||
|
||||
self.super_res_dict = {0:"None", 1:'RankSRGAN'}
|
||||
|
||||
def copy(self):
|
||||
return copy.copy(self)
|
||||
|
@ -158,8 +160,9 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
def toggle_color_transfer_mode(self):
|
||||
self.color_transfer_mode = (self.color_transfer_mode+1) % 3
|
||||
|
||||
def toggle_super_resolution(self):
|
||||
self.super_resolution = not self.super_resolution
|
||||
def toggle_super_resolution_mode(self):
|
||||
a = list( self.super_res_dict.keys() )
|
||||
self.super_resolution_mode = a[ (a.index(self.super_resolution_mode)+1) % len(a) ]
|
||||
|
||||
def add_color_degrade_power(self, diff):
|
||||
self.color_degrade_power = np.clip ( self.color_degrade_power+diff , 0, 100)
|
||||
|
@ -210,8 +213,13 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
|
||||
self.color_transfer_mode = self.ctm_str_dict[self.color_transfer_mode]
|
||||
|
||||
self.super_resolution = io.input_bool("Apply super resolution? (y/n ?:help skip:n) : ", False, help_message="Enhance details by applying DCSCN network.")
|
||||
s = """Choose super resolution mode: \n"""
|
||||
for key in self.super_res_dict.keys():
|
||||
s += f"""({key}) {self.super_res_dict[key]}\n"""
|
||||
s += f"""?:help Default: {list(self.super_res_dict.keys())[0]} : """
|
||||
self.super_resolution_mode = io.input_int (s, 0, valid_list=self.super_res_dict.keys(), help_message="Enhance details by applying superresolution network.")
|
||||
|
||||
|
||||
if 'raw' not in self.mode:
|
||||
self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
|
||||
self.export_mask_alpha = io.input_bool("Export png with alpha channel of the mask? (y/n skip:n) : ", False)
|
||||
|
@ -231,7 +239,7 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
self.motion_blur_power == other.motion_blur_power and \
|
||||
self.output_face_scale == other.output_face_scale and \
|
||||
self.color_transfer_mode == other.color_transfer_mode and \
|
||||
self.super_resolution == other.super_resolution and \
|
||||
self.super_resolution_mode == other.super_resolution_mode and \
|
||||
self.color_degrade_power == other.color_degrade_power and \
|
||||
self.export_mask_alpha == other.export_mask_alpha
|
||||
|
||||
|
@ -264,7 +272,7 @@ class ConverterConfigMasked(ConverterConfig):
|
|||
if 'raw' not in self.mode:
|
||||
r += f"""color_transfer_mode: { self.ctm_dict[self.color_transfer_mode]}\n"""
|
||||
|
||||
r += f"""super_resolution: {self.super_resolution}\n"""
|
||||
r += f"""super_resolution_mode: {self.super_res_dict[self.super_resolution_mode]}\n"""
|
||||
|
||||
if 'raw' not in self.mode:
|
||||
r += (f"""color_degrade_power: {self.color_degrade_power}\n"""
|
||||
|
@ -289,6 +297,8 @@ class ConverterConfigFaceAvatar(ConverterConfig):
|
|||
|
||||
#changeable params
|
||||
self.add_source_image = False
|
||||
self.super_resolution_mode = 0
|
||||
self.super_res_dict = {0:"None", 1:'RankSRGAN'}
|
||||
|
||||
def copy(self):
|
||||
return copy.copy(self)
|
||||
|
@ -296,22 +306,33 @@ class ConverterConfigFaceAvatar(ConverterConfig):
|
|||
#override
|
||||
def ask_settings(self):
|
||||
self.add_source_image = io.input_bool("Add source image? (y/n ?:help skip:n) : ", False, help_message="Add source image for comparison.")
|
||||
|
||||
s = """Choose super resolution mode: \n"""
|
||||
for key in self.super_res_dict.keys():
|
||||
s += f"""({key}) {self.super_res_dict[key]}\n"""
|
||||
s += f"""?:help Default: {list(self.super_res_dict.keys())[0]} : """
|
||||
self.super_resolution_mode = io.input_int (s, 0, valid_list=self.super_res_dict.keys(), help_message="Enhance details by applying superresolution network.")
|
||||
|
||||
def toggle_add_source_image(self):
|
||||
self.add_source_image = not self.add_source_image
|
||||
|
||||
def toggle_super_resolution_mode(self):
|
||||
a = list( self.super_res_dict.keys() )
|
||||
self.super_resolution_mode = a[ (a.index(self.super_resolution_mode)+1) % len(a) ]
|
||||
|
||||
#override
|
||||
def __eq__(self, other):
|
||||
#check equality of changeable params
|
||||
|
||||
if isinstance(other, ConverterConfigFaceAvatar):
|
||||
return self.add_source_image == other.add_source_image
|
||||
|
||||
return self.add_source_image == other.add_source_image and \
|
||||
self.super_resolution_mode == other.super_resolution_mode
|
||||
return False
|
||||
|
||||
#override
|
||||
def __str__(self):
|
||||
return ("ConverterConfig: \n"
|
||||
f"add_source_image : {self.add_source_image}\n"
|
||||
f"super_resolution_mode : {self.super_res_dict[self.super_resolution_mode]}\n"
|
||||
"================"
|
||||
)
|
Loading…
Add table
Add a link
Reference in a new issue