mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 13:02:15 -07:00
converter added --final-image-color-degrade-power - Degrades colors of final image to hide face problems. Valid range [0..100]
This commit is contained in:
parent
cef6710d24
commit
6f0d38d171
3 changed files with 49 additions and 16 deletions
26
main.py
26
main.py
|
@ -23,7 +23,6 @@ def str2bool(v):
|
|||
|
||||
if __name__ == "__main__":
|
||||
os_utils.set_process_lowest_prio()
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--tf-suppress-std', action="store_true", dest="tf_suppress_std", default=False, help="Suppress tensorflow initialization info. May not works on some python builds such as anaconda python 3.6.4. If you can fix it, you are welcome.")
|
||||
|
||||
|
@ -142,15 +141,22 @@ if __name__ == "__main__":
|
|||
except:
|
||||
arguments.output_face_scale_modifier = 0
|
||||
|
||||
try:
|
||||
arguments.transfercolor = bool ( {"1":True,"0":False}[input("Transfer color from original DST image? [0..1] (default 0) : ").lower()] )
|
||||
except:
|
||||
arguments.transfercolor = False
|
||||
|
||||
try:
|
||||
arguments.final_image_color_degrade_power = int ( input ("Degrade color power of final image [0..100] (default 0) : ") )
|
||||
except:
|
||||
arguments.final_image_color_degrade_power = 0
|
||||
|
||||
try:
|
||||
arguments.alpha = bool ( {"1":True,"0":False}[input("Export png with alpha channel? [0..1] (default 0) : ").lower()] )
|
||||
except:
|
||||
arguments.alpha = False
|
||||
|
||||
try:
|
||||
arguments.transfercolor = bool ( {"1":True,"0":False}[input("Transfer color from original DST image? [0..1] (default 0) : ").lower()] )
|
||||
except:
|
||||
arguments.transfercolor = False
|
||||
|
||||
|
||||
arguments.erode_mask_modifier = np.clip ( int(arguments.erode_mask_modifier), -100, 100)
|
||||
arguments.blur_mask_modifier = np.clip ( int(arguments.blur_mask_modifier), -100, 200)
|
||||
|
@ -169,9 +175,10 @@ if __name__ == "__main__":
|
|||
erode_mask_modifier = arguments.erode_mask_modifier,
|
||||
blur_mask_modifier = arguments.blur_mask_modifier,
|
||||
output_face_scale_modifier = arguments.output_face_scale_modifier,
|
||||
force_best_gpu_idx = arguments.force_best_gpu_idx,
|
||||
alpha = arguments.alpha,
|
||||
final_image_color_degrade_power = arguments.final_image_color_degrade_power,
|
||||
transfercolor = arguments.transfercolor,
|
||||
alpha = arguments.alpha,
|
||||
force_best_gpu_idx = arguments.force_best_gpu_idx
|
||||
)
|
||||
|
||||
convert_parser = subparsers.add_parser( "convert", help="Converter")
|
||||
|
@ -186,9 +193,10 @@ if __name__ == "__main__":
|
|||
convert_parser.add_argument('--erode-mask-modifier', type=int, dest="erode_mask_modifier", default=0, help="Automatic erode mask modifier. Valid range [-100..100].")
|
||||
convert_parser.add_argument('--blur-mask-modifier', type=int, dest="blur_mask_modifier", default=0, help="Automatic blur mask modifier. Valid range [-100..200].")
|
||||
convert_parser.add_argument('--output-face-scale-modifier', type=int, dest="output_face_scale_modifier", default=0, help="Output face scale modifier. Valid range [-50..50].")
|
||||
convert_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug converter.")
|
||||
convert_parser.add_argument('--alpha', action="store_true", dest="alpha", default=False, help="alpha channel.")
|
||||
convert_parser.add_argument('--final-image-color-degrade-power', type=int, dest="final_image_color_degrade_power", default=0, help="Degrades colors of final image to hide face problems. Valid range [0..100].")
|
||||
convert_parser.add_argument('--transfercolor', action="store_true", dest="transfercolor", default=False, help="transfer color from dst to merged.")
|
||||
convert_parser.add_argument('--alpha', action="store_true", dest="alpha", default=False, help="alpha channel.")
|
||||
convert_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug converter.")
|
||||
convert_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best.")
|
||||
|
||||
convert_parser.set_defaults(func=process_convert)
|
||||
|
|
|
@ -4,7 +4,6 @@ from facelib import FaceType
|
|||
import cv2
|
||||
import numpy as np
|
||||
from utils import image_utils
|
||||
|
||||
|
||||
class ConverterMasked(ConverterBase):
|
||||
|
||||
|
@ -20,9 +19,10 @@ class ConverterMasked(ConverterBase):
|
|||
mode='seamless',
|
||||
erode_mask_modifier=0,
|
||||
blur_mask_modifier=0,
|
||||
output_face_scale_modifier=0.0,
|
||||
alpha=False,
|
||||
transfercolor=False,
|
||||
output_face_scale_modifier=0.0,
|
||||
transfercolor=False,
|
||||
final_image_color_degrade_power=0,
|
||||
alpha=False,
|
||||
**in_options):
|
||||
|
||||
super().__init__(predictor)
|
||||
|
@ -38,8 +38,9 @@ class ConverterMasked(ConverterBase):
|
|||
self.erode_mask_modifier = erode_mask_modifier
|
||||
self.blur_mask_modifier = blur_mask_modifier
|
||||
self.output_face_scale = np.clip(1.0 + output_face_scale_modifier*0.01, 0.5, 1.0)
|
||||
self.alpha = alpha
|
||||
self.transfercolor = transfercolor
|
||||
self.final_image_color_degrade_power = np.clip (final_image_color_degrade_power, 0, 100)
|
||||
self.alpha = alpha
|
||||
|
||||
if self.erode_mask_modifier != 0 and not self.erode_mask:
|
||||
print ("Erode mask modifier not used in this model.")
|
||||
|
@ -165,7 +166,8 @@ class ConverterMasked(ConverterBase):
|
|||
if self.mode == 'hist-match-bw':
|
||||
prd_face_bgr = prd_face_bgr.astype(np.float32)
|
||||
|
||||
|
||||
|
||||
|
||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||
|
||||
if debug:
|
||||
|
@ -205,6 +207,16 @@ class ConverterMasked(ConverterBase):
|
|||
l_channel, tmp2_channel, tmp3_channel = cv2.split(lab_bw) #taking lightness channel L from merged fake
|
||||
img_LAB = cv2.merge((l_channel,a_channel, b_channel)) #merging light and color
|
||||
out_img = color.lab2rgb(img_LAB) #converting LAB to RGB
|
||||
|
||||
if self.final_image_color_degrade_power != 0:
|
||||
if debug:
|
||||
debugs += [out_img.copy()]
|
||||
out_img_reduced = image_utils.reduce_colors(out_img, 256)
|
||||
if self.final_image_color_degrade_power == 100:
|
||||
out_img = out_img_reduced
|
||||
else:
|
||||
alpha = self.final_image_color_degrade_power / 100.0
|
||||
out_img = (out_img*(1.0-alpha) + out_img_reduced*alpha)
|
||||
|
||||
if self.alpha:
|
||||
new_image = out_img.copy()
|
||||
|
@ -216,6 +228,8 @@ class ConverterMasked(ConverterBase):
|
|||
out_img = cv2.merge((b_channel,g_channel, r_channel, alpha_channel)) #mergin RGB with alpha
|
||||
out_img = out_img.astype(np.float32) / 255.0
|
||||
|
||||
|
||||
|
||||
if debug:
|
||||
debugs += [out_img.copy()]
|
||||
|
||||
|
|
|
@ -261,4 +261,15 @@ def warp_by_params (params, img, warp, transform, flip):
|
|||
img = cv2.warpAffine( img, params['rmat'], (params['w'], params['w']), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LANCZOS4 )
|
||||
if flip and params['flip']:
|
||||
img = img[:,::-1,:]
|
||||
return img
|
||||
return img
|
||||
|
||||
#n_colors = [0..256]
|
||||
def reduce_colors (img_bgr, n_colors):
|
||||
img_rgb = (cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) * 255.0).astype(np.uint8)
|
||||
img_rgb_pil = Image.fromarray(img_rgb)
|
||||
img_rgb_pil_p = img_rgb_pil.convert('P', palette=Image.ADAPTIVE, colors=n_colors)
|
||||
|
||||
img_rgb_p = img_rgb_pil_p.convert('RGB')
|
||||
img_bgr = cv2.cvtColor( np.array(img_rgb_p, dtype=np.float32) / 255.0, cv2.COLOR_RGB2BGR )
|
||||
|
||||
return img_bgr
|
Loading…
Add table
Add a link
Reference in a new issue