diff --git a/converters/ConvertAvatar.py b/converters/ConvertAvatar.py index 28e5c52..4d7f5d0 100644 --- a/converters/ConvertAvatar.py +++ b/converters/ConvertAvatar.py @@ -29,6 +29,9 @@ def ConvertFaceAvatar (cfg, prev_temporal_frame_infos, frame_info, next_temporal if cfg.super_resolution_mode != 0: prd_f = cfg.superres_func(cfg.super_resolution_mode, prd_f) + if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0: + prd_f = cfg.sharpen_func ( prd_f, cfg.sharpen_mode, 0.003, cfg.sharpen_amount) + out_img = np.clip(prd_f, 0.0, 1.0) if cfg.add_source_image: diff --git a/converters/ConvertMasked.py b/converters/ConvertMasked.py index 3130842..5641d3d 100644 --- a/converters/ConvertMasked.py +++ b/converters/ConvertMasked.py @@ -293,43 +293,35 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar out_img = img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa) + out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) ) + if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0: - out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) ) - if cfg.color_transfer_mode == 1: #if debug: # debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ] face_mask_aaa = cv2.warpAffine( img_face_mask_aaa, face_mat, (output_size, output_size) ) - new_out_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (out_face_bgr*255).astype(np.uint8), 0, 255), + out_face_bgr = imagelib.reinhard_color_transfer ( np.clip( (out_face_bgr*255).astype(np.uint8), 0, 255), np.clip( (dst_face_bgr*255).astype(np.uint8), 0, 255), source_mask=face_mask_aaa, target_mask=face_mask_aaa) - new_out_face_bgr = np.clip( new_out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0) + out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0) #if debug: - # debugs += [ np.clip( cv2.warpAffine( new_out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ] - + # debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ] elif cfg.color_transfer_mode == 2: #if debug: # debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ] - new_out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr) - new_out_face_bgr = np.clip( new_out_face_bgr, 0.0, 1.0) + out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr) + out_face_bgr = np.clip( out_face_bgr, 0.0, 1.0) #if debug: - # debugs += [ np.clip( cv2.warpAffine( new_out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ] - - new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) - out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 ) - + # debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ] + if cfg.mode == 'seamless-hist-match': - out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) ) - new_out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold) - new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) - out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 ) - - + out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold) + cfg_mp = cfg.motion_blur_power / 100.0 if cfg_mp != 0: k_size = int(frame_info.motion_power*cfg_mp) @@ -337,10 +329,14 @@ def ConvertMaskedFace (cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmar k_size = np.clip (k_size+1, 2, 50) if cfg.super_resolution_mode: k_size *= 2 - out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) ) - new_out_face_bgr = imagelib.LinearMotionBlur (out_face_bgr, k_size , frame_info.motion_deg) - new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) - out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 ) + out_face_bgr = imagelib.LinearMotionBlur (out_face_bgr, k_size , frame_info.motion_deg) + + if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0: + out_face_bgr = cfg.sharpen_func ( out_face_bgr, cfg.sharpen_mode, 0.003, cfg.sharpen_amount) + + new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) + out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 ) + if cfg.color_degrade_power != 0: #if debug: diff --git a/converters/ConverterConfig.py b/converters/ConverterConfig.py index dc43774..e309238 100644 --- a/converters/ConverterConfig.py +++ b/converters/ConverterConfig.py @@ -20,29 +20,69 @@ class ConverterConfig(object): self.predictor_func = predictor_func self.predictor_input_shape = predictor_input_shape - self.dcscn_upscale_func = None + self.superres_func = None + self.sharpen_func = None self.fanseg_input_size = None self.fanseg_extract_func = None + self.super_res_dict = {0:"None", 1:'RankSRGAN'} + self.sharpen_dict = {0:"None", 1:'box', 2:'gaussian'} + + #default changeable params + self.super_resolution_mode = 0 + self.sharpen_mode = 0 + self.sharpen_amount = 0 + def copy(self): return copy.copy(self) #overridable def ask_settings(self): - pass + s = """Choose sharpen mode: \n""" + for key in self.sharpen_dict.keys(): + s += f"""({key}) {self.sharpen_dict[key]}\n""" + s += f"""?:help Default: {list(self.sharpen_dict.keys())[0]} : """ + self.sharpen_mode = io.input_int (s, 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.") + + if self.sharpen_mode != 0: + self.sharpen_amount = np.clip ( io.input_int ("Choose sharpen amount [0..100] (skip:%d) : " % 10, 10), 0, 100 ) + + s = """Choose super resolution mode: \n""" + for key in self.super_res_dict.keys(): + s += f"""({key}) {self.super_res_dict[key]}\n""" + s += f"""?:help Default: {list(self.super_res_dict.keys())[0]} : """ + self.super_resolution_mode = io.input_int (s, 0, valid_list=self.super_res_dict.keys(), help_message="Enhance details by applying superresolution network.") + + def toggle_sharpen_mode(self): + a = list( self.sharpen_dict.keys() ) + self.sharpen_mode = a[ (a.index(self.sharpen_mode)+1) % len(a) ] + + def add_sharpen_amount(self, diff): + self.sharpen_amount = np.clip ( self.sharpen_amount+diff, 0, 100) + + def toggle_super_resolution_mode(self): + a = list( self.super_res_dict.keys() ) + self.super_resolution_mode = a[ (a.index(self.super_resolution_mode)+1) % len(a) ] #overridable def __eq__(self, other): #check equality of changeable params if isinstance(other, ConverterConfig): - return True + return self.sharpen_mode == other.sharpen_mode and \ + (self.sharpen_mode == 0 or ((self.sharpen_mode == other.sharpen_mode) and (self.sharpen_amount == other.sharpen_amount) )) and \ + self.super_resolution_mode == other.super_resolution_mode return False #overridable def __str__(self): - return "ConverterConfig: ." + r = "" + r += f"sharpen_mode : {self.sharpen_dict[self.sharpen_mode]}\n" + if self.sharpen_mode != 0: + r += f"sharpen_amount : {self.sharpen_amount}\n" + r += f"super_resolution_mode : {self.super_res_dict[self.super_resolution_mode]}\n" + return r class ConverterConfigMasked(ConverterConfig): @@ -81,8 +121,6 @@ class ConverterConfigMasked(ConverterConfig): self.clip_hborder_mask_per = clip_hborder_mask_per #default changeable params - - self.mode = 'overlay' self.masked_hist_match = True self.hist_match_threshold = 238 @@ -92,7 +130,6 @@ class ConverterConfigMasked(ConverterConfig): self.motion_blur_power = 0 self.output_face_scale = 0 self.color_transfer_mode = 0 - self.super_resolution_mode = 0 self.color_degrade_power = 0 self.export_mask_alpha = False @@ -118,11 +155,9 @@ class ConverterConfigMasked(ConverterConfig): 2:'dst', 4:'FAN-dst', 7:'learned*FAN-dst'} - + self.ctm_dict = { 0: "None", 1:"rct", 2:"lct" } self.ctm_str_dict = {None:0, "rct":1, "lct": 2 } - - self.super_res_dict = {0:"None", 1:'RankSRGAN'} def copy(self): return copy.copy(self) @@ -160,10 +195,6 @@ class ConverterConfigMasked(ConverterConfig): def toggle_color_transfer_mode(self): self.color_transfer_mode = (self.color_transfer_mode+1) % 3 - def toggle_super_resolution_mode(self): - a = list( self.super_res_dict.keys() ) - self.super_resolution_mode = a[ (a.index(self.super_resolution_mode)+1) % len(a) ] - def add_color_degrade_power(self, diff): self.color_degrade_power = np.clip ( self.color_degrade_power+diff , 0, 100) @@ -213,13 +244,8 @@ class ConverterConfigMasked(ConverterConfig): self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct']) self.color_transfer_mode = self.ctm_str_dict[self.color_transfer_mode] - s = """Choose super resolution mode: \n""" - for key in self.super_res_dict.keys(): - s += f"""({key}) {self.super_res_dict[key]}\n""" - s += f"""?:help Default: {list(self.super_res_dict.keys())[0]} : """ - self.super_resolution_mode = io.input_int (s, 0, valid_list=self.super_res_dict.keys(), help_message="Enhance details by applying superresolution network.") + super().ask_settings() - if 'raw' not in self.mode: self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100) self.export_mask_alpha = io.input_bool("Export png with alpha channel of the mask? (y/n skip:n) : ", False) @@ -230,7 +256,8 @@ class ConverterConfigMasked(ConverterConfig): #check equality of changeable params if isinstance(other, ConverterConfigMasked): - return self.mode == other.mode and \ + return super().__eq__(other) and \ + self.mode == other.mode and \ self.masked_hist_match == other.masked_hist_match and \ self.hist_match_threshold == other.hist_match_threshold and \ self.mask_mode == other.mask_mode and \ @@ -239,7 +266,6 @@ class ConverterConfigMasked(ConverterConfig): self.motion_blur_power == other.motion_blur_power and \ self.output_face_scale == other.output_face_scale and \ self.color_transfer_mode == other.color_transfer_mode and \ - self.super_resolution_mode == other.super_resolution_mode and \ self.color_degrade_power == other.color_degrade_power and \ self.export_mask_alpha == other.export_mask_alpha @@ -272,7 +298,7 @@ class ConverterConfigMasked(ConverterConfig): if 'raw' not in self.mode: r += f"""color_transfer_mode: { self.ctm_dict[self.color_transfer_mode]}\n""" - r += f"""super_resolution_mode: {self.super_res_dict[self.super_resolution_mode]}\n""" + r += super().__str__() if 'raw' not in self.mode: r += (f"""color_degrade_power: {self.color_degrade_power}\n""" @@ -297,8 +323,6 @@ class ConverterConfigFaceAvatar(ConverterConfig): #changeable params self.add_source_image = False - self.super_resolution_mode = 0 - self.super_res_dict = {0:"None", 1:'RankSRGAN'} def copy(self): return copy.copy(self) @@ -306,33 +330,24 @@ class ConverterConfigFaceAvatar(ConverterConfig): #override def ask_settings(self): self.add_source_image = io.input_bool("Add source image? (y/n ?:help skip:n) : ", False, help_message="Add source image for comparison.") - - s = """Choose super resolution mode: \n""" - for key in self.super_res_dict.keys(): - s += f"""({key}) {self.super_res_dict[key]}\n""" - s += f"""?:help Default: {list(self.super_res_dict.keys())[0]} : """ - self.super_resolution_mode = io.input_int (s, 0, valid_list=self.super_res_dict.keys(), help_message="Enhance details by applying superresolution network.") + super().ask_settings() def toggle_add_source_image(self): self.add_source_image = not self.add_source_image - - def toggle_super_resolution_mode(self): - a = list( self.super_res_dict.keys() ) - self.super_resolution_mode = a[ (a.index(self.super_resolution_mode)+1) % len(a) ] #override def __eq__(self, other): #check equality of changeable params if isinstance(other, ConverterConfigFaceAvatar): - return self.add_source_image == other.add_source_image and \ - self.super_resolution_mode == other.super_resolution_mode + return super().__eq__(other) and \ + self.add_source_image == other.add_source_image + return False #override def __str__(self): return ("ConverterConfig: \n" - f"add_source_image : {self.add_source_image}\n" - f"super_resolution_mode : {self.super_res_dict[self.super_resolution_mode]}\n" - "================" - ) \ No newline at end of file + f"add_source_image : {self.add_source_image}\n") + \ + super().__str__() + "================" + diff --git a/doc/manual_en_google_translated.docx b/doc/manual_en_google_translated.docx index 4c53cd7..88a2b90 100644 Binary files a/doc/manual_en_google_translated.docx and b/doc/manual_en_google_translated.docx differ diff --git a/doc/manual_en_google_translated.pdf b/doc/manual_en_google_translated.pdf index 16beeea..56e62fa 100644 Binary files a/doc/manual_en_google_translated.pdf and b/doc/manual_en_google_translated.pdf differ diff --git a/doc/manual_ru.pdf b/doc/manual_ru.pdf index 7a485d4..bc302e4 100644 Binary files a/doc/manual_ru.pdf and b/doc/manual_ru.pdf differ diff --git a/doc/manual_ru_source.docx b/doc/manual_ru_source.docx index 14e7646..1104e45 100644 Binary files a/doc/manual_ru_source.docx and b/doc/manual_ru_source.docx differ diff --git a/mainscripts/Converter.py b/mainscripts/Converter.py index a1cf6f2..f2b1701 100644 --- a/mainscripts/Converter.py +++ b/mainscripts/Converter.py @@ -84,6 +84,23 @@ class ConvertSubprocessor(Subprocessor): #therefore forcing active_DeviceConfig to CPU only nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True) + def sharpen_func (img, sharpen_mode=0, radius=0.003, amount=150): + h,w,c = img.shape + radius = max(1, round(w * radius)) + kernel_size = int((radius * 2) + 1) + if sharpen_mode == 1: #box + kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32) + kernel[ kernel_size//2, kernel_size//2] = 1.0 + box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2) + kernel = kernel + (kernel - box_filter) * amount + return cv2.filter2D(img, -1, kernel) + elif sharpen_mode == 2: #gaussian + blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0) + img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0) + return img + return img + self.sharpen_func = sharpen_func + self.fanseg_by_face_type = {} self.fanseg_input_size = 256 @@ -103,6 +120,7 @@ class ConvertSubprocessor(Subprocessor): def process_data(self, pf): #pf=ProcessingFrame cfg = pf.cfg.copy() cfg.predictor_func = self.predictor_func + cfg.sharpen_func = self.sharpen_func cfg.superres_func = self.superres_func frame_info = pf.frame_info @@ -239,8 +257,8 @@ class ConvertSubprocessor(Subprocessor): io.progress_bar_close() cfg_change_keys = ['`','1', '2', '3', '4', '5', '6', '7', '8', '9', - 'q', 'a', 'w', 's', 'e', 'd', 'r', 'f', 't', 'g','y','h', - 'z', 'x', 'c', 'v', 'b' ] + 'q', 'a', 'w', 's', 'e', 'd', 'r', 'f', 't', 'g','y','h','u','j', + 'z', 'x', 'c', 'v', 'b','n' ] #override def on_tick(self): self.predictor_func_host.process_messages() @@ -324,8 +342,12 @@ class ConvertSubprocessor(Subprocessor): elif chr_key == 'g': cfg.add_color_degrade_power(-1 if not shift_pressed else -5) elif chr_key == 'y': - cfg.add_output_face_scale(1 if not shift_pressed else 5) + cfg.add_sharpen_amount(1 if not shift_pressed else 5) elif chr_key == 'h': + cfg.add_sharpen_amount(-1 if not shift_pressed else -5) + elif chr_key == 'u': + cfg.add_output_face_scale(1 if not shift_pressed else 5) + elif chr_key == 'j': cfg.add_output_face_scale(-1 if not shift_pressed else -5) elif chr_key == 'z': @@ -338,11 +360,20 @@ class ConvertSubprocessor(Subprocessor): cfg.toggle_super_resolution_mode() elif chr_key == 'b': cfg.toggle_export_mask_alpha() + elif chr_key == 'n': + cfg.toggle_sharpen_mode() + else: - if chr_key == 's': + if chr_key == 'y': + cfg.add_sharpen_amount(1 if not shift_pressed else 5) + elif chr_key == 'h': + cfg.add_sharpen_amount(-1 if not shift_pressed else -5) + elif chr_key == 's': cfg.toggle_add_source_image() elif chr_key == 'v': cfg.toggle_super_resolution_mode() + elif chr_key == 'n': + cfg.toggle_sharpen_mode() if prev_cfg != cfg: io.log_info (cfg) @@ -368,16 +399,13 @@ class ConvertSubprocessor(Subprocessor): if go_prev_frame: if cur_frame is not None and cur_frame.is_done: cur_frame.image = None - - - if len(self.frames_done_idxs) > 0: - prev_frame = self.frames[self.frames_done_idxs.pop()] - self.frames_idxs.insert(0, prev_frame.idx) - prev_frame.is_shown = False - io.progress_bar_inc(-1) - - if go_prev_frame_overriding_cfg: - if cur_frame is not None: + if len(self.frames_done_idxs) > 0: + prev_frame = self.frames[self.frames_done_idxs.pop()] + self.frames_idxs.insert(0, prev_frame.idx) + prev_frame.is_shown = False + io.progress_bar_inc(-1) + + if go_prev_frame_overriding_cfg: if prev_frame.cfg != cur_frame.cfg: prev_frame.cfg = cur_frame.cfg prev_frame.is_done = False diff --git a/mainscripts/gfx/help_converter_face_avatar.jpg b/mainscripts/gfx/help_converter_face_avatar.jpg index c9fff23..6d848c3 100644 Binary files a/mainscripts/gfx/help_converter_face_avatar.jpg and b/mainscripts/gfx/help_converter_face_avatar.jpg differ diff --git a/mainscripts/gfx/help_converter_face_avatar_source.psd b/mainscripts/gfx/help_converter_face_avatar_source.psd index 98ec516..c892610 100644 Binary files a/mainscripts/gfx/help_converter_face_avatar_source.psd and b/mainscripts/gfx/help_converter_face_avatar_source.psd differ diff --git a/mainscripts/gfx/help_converter_masked.jpg b/mainscripts/gfx/help_converter_masked.jpg index 3b268d6..4bd0721 100644 Binary files a/mainscripts/gfx/help_converter_masked.jpg and b/mainscripts/gfx/help_converter_masked.jpg differ diff --git a/mainscripts/gfx/help_converter_masked_source.psd b/mainscripts/gfx/help_converter_masked_source.psd index 166ada2..bbca5bc 100644 Binary files a/mainscripts/gfx/help_converter_masked_source.psd and b/mainscripts/gfx/help_converter_masked_source.psd differ