mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-14 10:46:59 -07:00
update == 04.20.2019 == (#242)
* superb improved fanseg * _ * _ * added FANseg extractor for src and dst faces to use it in training * - * _ * _ * update to 'partial' func * _ * trained FANSeg_256_full_face.h5, new experimental models: AVATAR, RecycleGAN * _ * _ * _ * fix for TCC mode cards(tesla), was conflict with plaidML initialization. * _ * update manuals * _
This commit is contained in:
parent
7be2fd67f5
commit
046649e6be
32 changed files with 1152 additions and 329 deletions
|
@ -5,8 +5,9 @@ You can implement your own Converter, check example ConverterMasked.py
|
|||
|
||||
class Converter(object):
|
||||
TYPE_FACE = 0 #calls convert_face
|
||||
TYPE_IMAGE = 1 #calls convert_image without landmarks
|
||||
TYPE_IMAGE_WITH_LANDMARKS = 2 #calls convert_image with landmarks
|
||||
TYPE_FACE_AVATAR = 1 #calls convert_face with avatar_operator_face
|
||||
TYPE_IMAGE = 2 #calls convert_image without landmarks
|
||||
TYPE_IMAGE_WITH_LANDMARKS = 3 #calls convert_image with landmarks
|
||||
|
||||
#overridable
|
||||
def __init__(self, predictor_func, type):
|
||||
|
@ -23,13 +24,13 @@ class Converter(object):
|
|||
pass
|
||||
|
||||
#overridable
|
||||
def cli_convert_face (self, img_bgr, img_face_landmarks, debug):
|
||||
def cli_convert_face (self, img_bgr, img_face_landmarks, debug, avaperator_face_bgr=None, **kwargs):
|
||||
#return float32 image
|
||||
#if debug , return tuple ( images of any size and channels, ...)
|
||||
return image
|
||||
|
||||
|
||||
#overridable
|
||||
def convert_image (self, img_bgr, img_landmarks, debug):
|
||||
def cli_convert_image (self, img_bgr, img_landmarks, debug):
|
||||
#img_landmarks not None, if input image is png with embedded data
|
||||
#return float32 image
|
||||
#if debug , return tuple ( images of any size and channels, ...)
|
||||
|
|
70
converters/ConverterAvatar.py
Normal file
70
converters/ConverterAvatar.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from facelib import FaceType, LandmarksProcessor
|
||||
from joblib import SubprocessFunctionCaller
|
||||
from utils.pickle_utils import AntiPickler
|
||||
|
||||
from .Converter import Converter
|
||||
|
||||
class ConverterAvatar(Converter):
|
||||
|
||||
#override
|
||||
def __init__(self, predictor_func,
|
||||
predictor_input_size=0):
|
||||
|
||||
super().__init__(predictor_func, Converter.TYPE_FACE_AVATAR)
|
||||
|
||||
self.predictor_input_size = predictor_input_size
|
||||
|
||||
#dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
|
||||
predictor_func ( np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ),
|
||||
np.zeros ( (predictor_input_size,predictor_input_size,1), dtype=np.float32 ) )
|
||||
time.sleep(2)
|
||||
|
||||
predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(predictor_func)
|
||||
self.predictor_func_host = AntiPickler(predictor_func_host)
|
||||
self.predictor_func = predictor_func
|
||||
|
||||
#overridable
|
||||
def on_host_tick(self):
|
||||
self.predictor_func_host.obj.process_messages()
|
||||
|
||||
#override
|
||||
def cli_convert_face (self, img_bgr, img_face_landmarks, debug, avaperator_face_bgr=None, **kwargs):
|
||||
if debug:
|
||||
debugs = [img_bgr.copy()]
|
||||
|
||||
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
||||
|
||||
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
||||
img_face_mask_aaa = np.repeat(img_face_mask_a, 3, -1)
|
||||
|
||||
output_size = self.predictor_input_size
|
||||
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=FaceType.FULL)
|
||||
|
||||
dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
|
||||
|
||||
predictor_input_dst_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (self.predictor_input_size,self.predictor_input_size), cv2.INTER_CUBIC )
|
||||
prd_inp_dst_face_mask_a = predictor_input_dst_face_mask_a_0[...,np.newaxis]
|
||||
|
||||
prd_inp_avaperator_face_bgr = cv2.resize (avaperator_face_bgr, (self.predictor_input_size,self.predictor_input_size), cv2.INTER_CUBIC )
|
||||
|
||||
prd_face_bgr = self.predictor_func ( prd_inp_avaperator_face_bgr, prd_inp_dst_face_mask_a )
|
||||
|
||||
out_img = img_bgr.copy()
|
||||
out_img = cv2.warpAffine( prd_face_bgr, face_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||
out_img = np.clip(out_img, 0.0, 1.0)
|
||||
|
||||
if debug:
|
||||
debugs += [out_img.copy()]
|
||||
|
||||
out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa) , 0, 1.0 )
|
||||
|
||||
if debug:
|
||||
debugs += [out_img.copy()]
|
||||
|
||||
|
||||
return debugs if debug else out_img
|
|
@ -1,40 +1,50 @@
|
|||
from .Converter import Converter
|
||||
from facelib import LandmarksProcessor
|
||||
from facelib import FaceType
|
||||
import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
'''
|
||||
predictor_func:
|
||||
input: [predictor_input_size, predictor_input_size, BGR]
|
||||
output: [predictor_input_size, predictor_input_size, BGR]
|
||||
'''
|
||||
from facelib import FaceType, LandmarksProcessor
|
||||
from joblib import SubprocessFunctionCaller
|
||||
from utils.pickle_utils import AntiPickler
|
||||
|
||||
from .Converter import Converter
|
||||
|
||||
class ConverterImage(Converter):
|
||||
|
||||
#override
|
||||
def __init__(self, predictor_func,
|
||||
predictor_input_size=0,
|
||||
output_size=0):
|
||||
predictor_input_size=0):
|
||||
|
||||
super().__init__(predictor_func, Converter.TYPE_IMAGE)
|
||||
|
||||
self.predictor_input_size = predictor_input_size
|
||||
self.output_size = output_size
|
||||
|
||||
#dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
|
||||
predictor_func ( np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ) )
|
||||
time.sleep(2)
|
||||
|
||||
#override
|
||||
def dummy_predict(self):
|
||||
self.predictor_func ( np.zeros ( (self.predictor_input_size, self.predictor_input_size,3), dtype=np.float32) )
|
||||
predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(predictor_func)
|
||||
self.predictor_func_host = AntiPickler(predictor_func_host)
|
||||
self.predictor_func = predictor_func
|
||||
|
||||
#overridable
|
||||
def on_host_tick(self):
|
||||
self.predictor_func_host.obj.process_messages()
|
||||
|
||||
#override
|
||||
def convert_image (self, img_bgr, img_landmarks, debug):
|
||||
def cli_convert_image (self, img_bgr, img_landmarks, debug):
|
||||
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
||||
|
||||
predictor_input_bgr = cv2.resize ( img_bgr, (self.predictor_input_size, self.predictor_input_size), cv2.INTER_LANCZOS4 )
|
||||
predicted_bgr = self.predictor_func ( predictor_input_bgr )
|
||||
|
||||
if debug:
|
||||
debugs = [predictor_input_bgr]
|
||||
|
||||
output = self.predictor_func ( predictor_input_bgr )
|
||||
|
||||
output = cv2.resize ( predicted_bgr, (self.output_size, self.output_size), cv2.INTER_LANCZOS4 )
|
||||
if debug:
|
||||
return (predictor_input_bgr,output,)
|
||||
return output
|
||||
if debug:
|
||||
debugs += [out_img.copy()]
|
||||
|
||||
return debugs if debug else output
|
||||
|
|
|
@ -30,7 +30,8 @@ class ConverterMasked(Converter):
|
|||
base_blur_mask_modifier = 0,
|
||||
default_erode_mask_modifier = 0,
|
||||
default_blur_mask_modifier = 0,
|
||||
clip_hborder_mask_per = 0):
|
||||
clip_hborder_mask_per = 0,
|
||||
force_mask_mode=-1):
|
||||
|
||||
super().__init__(predictor_func, Converter.TYPE_FACE)
|
||||
|
||||
|
@ -76,10 +77,13 @@ class ConverterMasked(Converter):
|
|||
if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
|
||||
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
|
||||
|
||||
if face_type == FaceType.FULL:
|
||||
self.mask_mode = np.clip ( io.input_int ("Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) help. Default - %d : " % (1) , 1, help_message="If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks."), 1, 6 )
|
||||
if force_mask_mode != -1:
|
||||
self.mask_mode = force_mask_mode
|
||||
else:
|
||||
self.mask_mode = np.clip ( io.input_int ("Mask mode: (1) learned, (2) dst . Default - %d : " % (1) , 1), 1, 2 )
|
||||
if face_type == FaceType.FULL:
|
||||
self.mask_mode = np.clip ( io.input_int ("Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst , (5) FAN-prd*FAN-dst (6) learned*FAN-prd*FAN-dst (?) help. Default - %d : " % (1) , 1, help_message="If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face. 'FAN-prd*FAN-dst' or 'learned*FAN-prd*FAN-dst' - using multiplied masks."), 1, 6 )
|
||||
else:
|
||||
self.mask_mode = np.clip ( io.input_int ("Mask mode: (1) learned, (2) dst . Default - %d : " % (1) , 1), 1, 2 )
|
||||
|
||||
if self.mask_mode >= 3 and self.mask_mode <= 6:
|
||||
self.fan_seg = None
|
||||
|
@ -118,10 +122,10 @@ class ConverterMasked(Converter):
|
|||
#overridable
|
||||
def on_cli_initialize(self):
|
||||
if (self.mask_mode >= 3 and self.mask_mode <= 6) and self.fan_seg == None:
|
||||
self.fan_seg = FANSegmentator(256, FaceType.toString(FaceType.FULL) )
|
||||
self.fan_seg = FANSegmentator(256, FaceType.toString( self.face_type ) )
|
||||
|
||||
#override
|
||||
def cli_convert_face (self, img_bgr, img_face_landmarks, debug):
|
||||
def cli_convert_face (self, img_bgr, img_face_landmarks, debug, **kwargs):
|
||||
if debug:
|
||||
debugs = [img_bgr.copy()]
|
||||
|
||||
|
@ -171,13 +175,13 @@ class ConverterMasked(Converter):
|
|||
|
||||
if self.mask_mode == 3 or self.mask_mode == 5 or self.mask_mode == 6:
|
||||
prd_face_bgr_256 = cv2.resize (prd_face_bgr, (256,256) )
|
||||
prd_face_bgr_256_mask = self.fan_seg.extract_from_bgr( prd_face_bgr_256[np.newaxis,...] ) [0]
|
||||
prd_face_bgr_256_mask = self.fan_seg.extract( prd_face_bgr_256 )
|
||||
FAN_prd_face_mask_a_0 = cv2.resize (prd_face_bgr_256_mask, (output_size,output_size), cv2.INTER_CUBIC)
|
||||
|
||||
if self.mask_mode == 4 or self.mask_mode == 5 or self.mask_mode == 6:
|
||||
face_256_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, 256, face_type=FaceType.FULL)
|
||||
dst_face_256_bgr = cv2.warpAffine(img_bgr, face_256_mat, (256, 256), flags=cv2.INTER_LANCZOS4 )
|
||||
dst_face_256_mask = self.fan_seg.extract_from_bgr( dst_face_256_bgr[np.newaxis,...] ) [0]
|
||||
dst_face_256_mask = self.fan_seg.extract( dst_face_256_bgr )
|
||||
FAN_dst_face_mask_a_0 = cv2.resize (dst_face_256_mask, (output_size,output_size), cv2.INTER_CUBIC)
|
||||
|
||||
if self.mask_mode == 3: #FAN-prd
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from .Converter import Converter
|
||||
from .ConverterMasked import ConverterMasked
|
||||
from .ConverterImage import ConverterImage
|
||||
from .ConverterAvatar import ConverterAvatar
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue