mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 05:22:06 -07:00
Converter: added new mask modes: FAN-prd, FAN-dst
This commit is contained in:
parent
9849bcc1e5
commit
c4f41a7e76
5 changed files with 11426 additions and 11214 deletions
|
@ -2,6 +2,7 @@ import traceback
|
|||
from .Converter import Converter
|
||||
from facelib import LandmarksProcessor
|
||||
from facelib import FaceType
|
||||
from facelib import FANSegmentator
|
||||
import cv2
|
||||
import numpy as np
|
||||
from utils import image_utils
|
||||
|
@ -66,7 +67,13 @@ class ConverterMasked(Converter):
|
|||
if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
|
||||
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
|
||||
|
||||
self.use_predicted_mask = io.input_bool("Use predicted mask? (y/n skip:y) : ", True)
|
||||
if face_type == FaceType.FULL:
|
||||
self.mask_mode = io.input_int ("Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst (?) help. Default - %d : " % (1) , 1, help_message="If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face.")
|
||||
else:
|
||||
self.mask_mode = io.input_int ("Mask mode: (1) learned, (2) dst . Default - %d : " % (1) , 1)
|
||||
|
||||
if self.mask_mode == 3 or self.mask_mode == 4:
|
||||
self.fan_seg = None
|
||||
|
||||
if self.mode != 'raw':
|
||||
self.erode_mask_modifier = base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200)
|
||||
|
@ -93,6 +100,8 @@ class ConverterMasked(Converter):
|
|||
|
||||
#override
|
||||
def convert_face (self, img_bgr, img_face_landmarks, debug):
|
||||
if (self.mask_mode == 3 or self.mask_mode == 4) and self.fan_seg == None:
|
||||
self.fan_seg = FANSegmentator(256, FaceType.toString(FaceType.FULL) )
|
||||
|
||||
if self.over_res != 1:
|
||||
img_bgr = cv2.resize ( img_bgr, ( img_bgr.shape[1]*self.over_res, img_bgr.shape[0]*self.over_res ) )
|
||||
|
@ -120,8 +129,17 @@ class ConverterMasked(Converter):
|
|||
prd_face_bgr = np.clip (predicted_bgra[:,:,0:3], 0, 1.0 )
|
||||
prd_face_mask_a_0 = np.clip (predicted_bgra[:,:,3], 0.0, 1.0)
|
||||
|
||||
if not self.use_predicted_mask:
|
||||
if self.mask_mode == 2: #dst
|
||||
prd_face_mask_a_0 = predictor_input_mask_a_0
|
||||
elif self.mask_mode == 3: #FAN-prd
|
||||
prd_face_bgr_256 = cv2.resize (prd_face_bgr, (256,256) )
|
||||
prd_face_bgr_256_mask = self.fan_seg.extract_from_bgr( np.expand_dims(prd_face_bgr_256,0) ) [0]
|
||||
prd_face_mask_a_0 = cv2.resize (prd_face_bgr_256_mask, (self.predictor_input_size, self.predictor_input_size))
|
||||
elif self.mask_mode == 4: #FAN-dst
|
||||
face_256_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, 256, face_type=FaceType.FULL)
|
||||
dst_face_256_bgr = cv2.warpAffine(img_bgr, face_256_mat, (256, 256), flags=cv2.INTER_LANCZOS4 )
|
||||
dst_face_256_mask = self.fan_seg.extract_from_bgr( np.expand_dims(dst_face_256_bgr,0) ) [0]
|
||||
prd_face_mask_a_0 = cv2.resize (dst_face_256_mask, (self.predictor_input_size, self.predictor_input_size))
|
||||
|
||||
prd_face_mask_a_0[ prd_face_mask_a_0 < 0.001 ] = 0.0
|
||||
|
||||
|
|
Binary file not shown.
22563
doc/manual_ru_source.xml
22563
doc/manual_ru_source.xml
File diff suppressed because one or more lines are too long
BIN
facelib/FANSeg_256_full_face.h5
Normal file
BIN
facelib/FANSeg_256_full_face.h5
Normal file
Binary file not shown.
|
@ -98,7 +98,6 @@ class FANSegmentator(object):
|
|||
|
||||
def func(input):
|
||||
input_len = len(input)
|
||||
|
||||
x = input[input_len-1]
|
||||
for i in range(input_len-1, -1, -1):
|
||||
x = upscale( min(ngf* (2**i) *4, ngf*8 *4 ) )(x)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue