mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-16 10:03:41 -07:00
Merge pull request #47 from MaksV79/master
Added options --alpha and --transfercolor
This commit is contained in:
commit
ef7fdd49a0
3 changed files with 46 additions and 8 deletions
22
main.py
22
main.py
|
@ -1,4 +1,4 @@
|
|||
import os
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from utils import Path_utils
|
||||
|
@ -136,7 +136,17 @@ if __name__ == "__main__":
|
|||
arguments.blur_mask_modifier = int ( input ("Choose blur mask modifier [-100..200] (default 0) : ") )
|
||||
except:
|
||||
arguments.blur_mask_modifier = 0
|
||||
|
||||
|
||||
try:
|
||||
arguments.alpha = bool ( {"1":True,"0":False}[input("Export png with alpha channel? [0..1] (default 0) : ").lower()] )
|
||||
except:
|
||||
arguments.alpha = False
|
||||
|
||||
try:
|
||||
arguments.transfercolor = bool ( {"1":True,"0":False}[input("Transfer color from original DST image? [0..1] (default 0) : ").lower()] )
|
||||
except:
|
||||
arguments.transfercolor = False
|
||||
|
||||
arguments.erode_mask_modifier = np.clip ( int(arguments.erode_mask_modifier), -100, 100)
|
||||
arguments.blur_mask_modifier = np.clip ( int(arguments.blur_mask_modifier), -100, 200)
|
||||
|
||||
|
@ -152,7 +162,9 @@ if __name__ == "__main__":
|
|||
masked_hist_match = arguments.masked_hist_match,
|
||||
erode_mask_modifier = arguments.erode_mask_modifier,
|
||||
blur_mask_modifier = arguments.blur_mask_modifier,
|
||||
force_best_gpu_idx = arguments.force_best_gpu_idx
|
||||
force_best_gpu_idx = arguments.force_best_gpu_idx,
|
||||
alpha = arguments.alpha,
|
||||
transfercolor = arguments.transfercolor,
|
||||
)
|
||||
|
||||
convert_parser = subparsers.add_parser( "convert", help="Converter")
|
||||
|
@ -167,6 +179,8 @@ if __name__ == "__main__":
|
|||
convert_parser.add_argument('--erode-mask-modifier', type=int, dest="erode_mask_modifier", default=0, help="Automatic erode mask modifier. Valid range [-100..100].")
|
||||
convert_parser.add_argument('--blur-mask-modifier', type=int, dest="blur_mask_modifier", default=0, help="Automatic blur mask modifier. Valid range [-100..200].")
|
||||
convert_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug converter.")
|
||||
convert_parser.add_argument('--alpha', action="store_true", dest="alpha", default=False, help="alpha channel.")
|
||||
convert_parser.add_argument('--transfercolor', action="store_true", dest="transfercolor", default=False, help="transfer color from dst to merged.")
|
||||
convert_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best.")
|
||||
|
||||
convert_parser.set_defaults(func=process_convert)
|
||||
|
@ -185,4 +199,4 @@ if __name__ == "__main__":
|
|||
'''
|
||||
import code
|
||||
code.interact(local=dict(globals(), **locals()))
|
||||
'''
|
||||
'''
|
||||
|
|
|
@ -24,7 +24,9 @@ class ConverterMasked(ConverterBase):
|
|||
masked_hist_match = False,
|
||||
mode='seamless',
|
||||
erode_mask_modifier=0,
|
||||
blur_mask_modifier=0,
|
||||
blur_mask_modifier=0,
|
||||
alpha=False,
|
||||
transfercolor=False,
|
||||
**in_options):
|
||||
|
||||
super().__init__(predictor)
|
||||
|
@ -39,6 +41,8 @@ class ConverterMasked(ConverterBase):
|
|||
self.mode = mode
|
||||
self.erode_mask_modifier = erode_mask_modifier
|
||||
self.blur_mask_modifier = blur_mask_modifier
|
||||
self.alpha = alpha
|
||||
self.transfercolor = transfercolor
|
||||
|
||||
if self.erode_mask_modifier != 0 and not self.erode_mask:
|
||||
print ("Erode mask modifier not used in this model.")
|
||||
|
@ -186,9 +190,28 @@ class ConverterMasked(ConverterBase):
|
|||
new_out_face_bgr = image_utils.color_hist_match(out_face_bgr, dst_face_bgr )
|
||||
new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||
out_img = np.clip( img_bgr*(1-img_mask_blurry_aaa) + (new_out*img_mask_blurry_aaa) , 0, 1.0 )
|
||||
|
||||
|
||||
if self.transfercolor: #making transfer color from original DST image to fake
|
||||
from skimage import io, color
|
||||
lab_clr = color.rgb2lab(img_bgr) #original DST, converting RGB to LAB color space
|
||||
lab_bw = color.rgb2lab(out_img) #fake, converting RGB to LAB color space
|
||||
tmp_channel, a_channel, b_channel = cv2.split(lab_clr) #taking color channel A and B from original dst image
|
||||
l_channel, tmp2_channel, tmp3_channel = cv2.split(lab_bw) #taking lightness channel L from merged fake
|
||||
img_LAB = cv2.merge((l_channel,a_channel, b_channel)) #merging light and color
|
||||
out_img = color.lab2rgb(img_LAB) #converting LAB to RGB
|
||||
|
||||
if self.alpha:
|
||||
new_image = out_img.copy()
|
||||
new_image = (new_image*255).astype(np.uint8) #convert image to int
|
||||
b_channel, g_channel, r_channel = cv2.split(new_image) #splitting RGB
|
||||
alpha_channel = img_mask_blurry_aaa.copy() #making copy of alpha channel
|
||||
alpha_channel = (alpha_channel*255).astype(np.uint8)
|
||||
alpha_channel, tmp2, tmp3 = cv2.split(alpha_channel) #splitting alpha to three channels, they all same in original alpha channel, we need just one
|
||||
out_img = cv2.merge((b_channel,g_channel, r_channel, alpha_channel)) #mergin RGB with alpha
|
||||
out_img = out_img.astype(np.float32) / 255.0
|
||||
|
||||
if debug:
|
||||
debugs += [out_img.copy()]
|
||||
|
||||
return debugs if debug else out_img
|
||||
|
||||
|
||||
|
|
|
@ -7,4 +7,5 @@ tensorflow-gpu==1.8.0
|
|||
scikit-image
|
||||
dlib==19.10.0
|
||||
tqdm
|
||||
git+https://www.github.com/keras-team/keras-contrib.git
|
||||
git+https://www.github.com/keras-team/keras-contrib.git
|
||||
skimage
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue