mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 13:02:15 -07:00
Added new face type : head
Now you can replace the head. Example: https://www.youtube.com/watch?v=xr5FHd0AdlQ Requirements: Post processing skill in Adobe After Effects or Davinci Resolve. Usage: 1) Find suitable dst footage with the monotonous background behind head 2) Use “extract head” script 3) Gather rich src headset from only one scene (same color and haircut) 4) Mask whole head for src and dst using XSeg editor 5) Train XSeg 6) Apply trained XSeg mask for src and dst headsets 7) Train SAEHD using ‘head’ face_type as regular deepfake model with DF archi. You can use pretrained model for head. Minimum recommended resolution for head is 224. 8) Extract multiple tracks, using Merger: a. Raw-rgb b. XSeg-prd mask c. XSeg-dst mask 9) Using AAE or DavinciResolve, do: a. Hide source head using XSeg-prd mask: content-aware-fill, clone-stamp, background retraction, or other technique b. Overlay new head using XSeg-dst mask Warning: Head faceset can be used for whole_face or less types of training only with XSeg masking. XSegEditor: added button ‘view trained XSeg mask’, so you can see which frames should be masked to improve mask quality.
This commit is contained in:
parent
383d4d3736
commit
2b7364005d
21 changed files with 506 additions and 413 deletions
|
@ -1,13 +1,16 @@
|
||||||
import pickle
|
import pickle
|
||||||
import struct
|
import struct
|
||||||
|
import traceback
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
from core import imagelib
|
||||||
|
from core.imagelib import SegIEPolys
|
||||||
from core.interact import interact as io
|
from core.interact import interact as io
|
||||||
from core.structex import *
|
from core.structex import *
|
||||||
from facelib import FaceType
|
from facelib import FaceType
|
||||||
from core.imagelib import SegIEPolys
|
|
||||||
|
|
||||||
class DFLJPG(object):
|
class DFLJPG(object):
|
||||||
def __init__(self, filename):
|
def __init__(self, filename):
|
||||||
|
@ -148,7 +151,7 @@ class DFLJPG(object):
|
||||||
|
|
||||||
return inst
|
return inst
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print (e)
|
io.log_err (f'Exception occured while DFLJPG.load : {traceback.format_exc()}')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def has_data(self):
|
def has_data(self):
|
||||||
|
@ -272,7 +275,6 @@ class DFLJPG(object):
|
||||||
if len(img.shape) == 2:
|
if len(img.shape) == 2:
|
||||||
img = img[...,None]
|
img = img[...,None]
|
||||||
|
|
||||||
|
|
||||||
return img.astype(np.float32) / 255.0
|
return img.astype(np.float32) / 255.0
|
||||||
|
|
||||||
|
|
||||||
|
@ -281,13 +283,20 @@ class DFLJPG(object):
|
||||||
self.dfl_dict['xseg_mask'] = None
|
self.dfl_dict['xseg_mask'] = None
|
||||||
return
|
return
|
||||||
|
|
||||||
ret, buf = cv2.imencode( '.png', np.clip( mask_a*255, 0, 255 ).astype(np.uint8) )
|
mask_a = imagelib.normalize_channels(mask_a, 1)
|
||||||
|
img_data = np.clip( mask_a*255, 0, 255 ).astype(np.uint8)
|
||||||
|
|
||||||
|
data_max_len = 4096
|
||||||
|
|
||||||
|
ret, buf = cv2.imencode('.png', img_data)
|
||||||
|
|
||||||
|
if not ret or len(buf) > data_max_len:
|
||||||
|
for jpeg_quality in range(100,-1,-1):
|
||||||
|
ret, buf = cv2.imencode( '.jpg', img_data, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality] )
|
||||||
|
if ret and len(buf) <= data_max_len:
|
||||||
|
break
|
||||||
|
|
||||||
if not ret:
|
if not ret:
|
||||||
raise Exception("unable to generate PNG data for set_xseg_mask")
|
raise Exception("set_xseg_mask: unable to generate image data for set_xseg_mask")
|
||||||
|
|
||||||
self.dfl_dict['xseg_mask'] = buf
|
self.dfl_dict['xseg_mask'] = buf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,13 @@ DeepFaceLab is used by such popular youtube channels as
|
||||||
|
|
||||||
<img src="doc/replace_the_face.png" align="center">
|
<img src="doc/replace_the_face.png" align="center">
|
||||||
|
|
||||||
|
</td></tr>
|
||||||
|
<tr><td align="center" width="9999">
|
||||||
|
|
||||||
|
## Replace the head
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr><td align="center" width="9999">
|
<tr><td align="center" width="9999">
|
||||||
|
|
||||||
|
|
|
@ -19,3 +19,4 @@ class QIconDB():
|
||||||
QIconDB.right = QIcon ( str(icon_path / 'right.png') )
|
QIconDB.right = QIcon ( str(icon_path / 'right.png') )
|
||||||
QIconDB.pt_edit_mode = QIcon ( str(icon_path / 'pt_edit_mode.png') )
|
QIconDB.pt_edit_mode = QIcon ( str(icon_path / 'pt_edit_mode.png') )
|
||||||
QIconDB.view_baked = QIcon ( str(icon_path / 'view_baked.png') )
|
QIconDB.view_baked = QIcon ( str(icon_path / 'view_baked.png') )
|
||||||
|
QIconDB.view_xseg = QIcon ( str(icon_path / 'view_xseg.png') )
|
|
@ -30,6 +30,11 @@ class QStringDB():
|
||||||
'zh' : '查看遮罩通道',
|
'zh' : '查看遮罩通道',
|
||||||
}[lang]
|
}[lang]
|
||||||
|
|
||||||
|
QStringDB.btn_view_xseg_mask_tip = { 'en' : 'View trained XSeg mask',
|
||||||
|
'ru' : 'Посмотреть тренированную XSeg маску',
|
||||||
|
'zh' : '查看导入后的XSeg遮罩',
|
||||||
|
}[lang]
|
||||||
|
|
||||||
QStringDB.btn_poly_type_include_tip = { 'en' : 'Poly include mode',
|
QStringDB.btn_poly_type_include_tip = { 'en' : 'Poly include mode',
|
||||||
'ru' : 'Режим полигонов - включение',
|
'ru' : 'Режим полигонов - включение',
|
||||||
'zh' : '包含选区模式',
|
'zh' : '包含选区模式',
|
||||||
|
|
|
@ -18,6 +18,7 @@ from PyQt5.QtWidgets import *
|
||||||
|
|
||||||
from core import pathex
|
from core import pathex
|
||||||
from core.cv2ex import *
|
from core.cv2ex import *
|
||||||
|
from core import imagelib
|
||||||
from core.imagelib import SegIEPoly, SegIEPolys, SegIEPolyType, sd
|
from core.imagelib import SegIEPoly, SegIEPolys, SegIEPolyType, sd
|
||||||
from core.qtex import *
|
from core.qtex import *
|
||||||
from DFLIMG import *
|
from DFLIMG import *
|
||||||
|
@ -33,6 +34,7 @@ class OpMode(IntEnum):
|
||||||
DRAW_PTS = 1
|
DRAW_PTS = 1
|
||||||
EDIT_PTS = 2
|
EDIT_PTS = 2
|
||||||
VIEW_BAKED = 3
|
VIEW_BAKED = 3
|
||||||
|
VIEW_XSEG_MASK = 4
|
||||||
|
|
||||||
class PTEditMode(IntEnum):
|
class PTEditMode(IntEnum):
|
||||||
MOVE = 0
|
MOVE = 0
|
||||||
|
@ -244,11 +246,17 @@ class QCanvasControlsRightBar(QFrame):
|
||||||
btn_view_baked_mask.setDefaultAction(self.btn_view_baked_mask_act)
|
btn_view_baked_mask.setDefaultAction(self.btn_view_baked_mask_act)
|
||||||
btn_view_baked_mask.setIconSize(QUIConfig.icon_q_size)
|
btn_view_baked_mask.setIconSize(QUIConfig.icon_q_size)
|
||||||
|
|
||||||
|
btn_view_xseg_mask = QToolButton()
|
||||||
|
self.btn_view_xseg_mask_act = QActionEx( QIconDB.view_xseg, QStringDB.btn_view_xseg_mask_tip, shortcut='5', shortcut_in_tooltip=True, is_checkable=True)
|
||||||
|
btn_view_xseg_mask.setDefaultAction(self.btn_view_xseg_mask_act)
|
||||||
|
btn_view_xseg_mask.setIconSize(QUIConfig.icon_q_size)
|
||||||
|
|
||||||
self.btn_poly_color_act_grp = QActionGroup (self)
|
self.btn_poly_color_act_grp = QActionGroup (self)
|
||||||
self.btn_poly_color_act_grp.addAction(self.btn_poly_color_red_act)
|
self.btn_poly_color_act_grp.addAction(self.btn_poly_color_red_act)
|
||||||
self.btn_poly_color_act_grp.addAction(self.btn_poly_color_green_act)
|
self.btn_poly_color_act_grp.addAction(self.btn_poly_color_green_act)
|
||||||
self.btn_poly_color_act_grp.addAction(self.btn_poly_color_blue_act)
|
self.btn_poly_color_act_grp.addAction(self.btn_poly_color_blue_act)
|
||||||
self.btn_poly_color_act_grp.addAction(self.btn_view_baked_mask_act)
|
self.btn_poly_color_act_grp.addAction(self.btn_view_baked_mask_act)
|
||||||
|
self.btn_poly_color_act_grp.addAction(self.btn_view_xseg_mask_act)
|
||||||
self.btn_poly_color_act_grp.setExclusive(True)
|
self.btn_poly_color_act_grp.setExclusive(True)
|
||||||
#==============================================
|
#==============================================
|
||||||
|
|
||||||
|
@ -257,6 +265,7 @@ class QCanvasControlsRightBar(QFrame):
|
||||||
controls_bar_frame1_l.addWidget ( btn_poly_color_green )
|
controls_bar_frame1_l.addWidget ( btn_poly_color_green )
|
||||||
controls_bar_frame1_l.addWidget ( btn_poly_color_blue )
|
controls_bar_frame1_l.addWidget ( btn_poly_color_blue )
|
||||||
controls_bar_frame1_l.addWidget ( btn_view_baked_mask )
|
controls_bar_frame1_l.addWidget ( btn_view_baked_mask )
|
||||||
|
controls_bar_frame1_l.addWidget ( btn_view_xseg_mask )
|
||||||
controls_bar_frame1 = QFrame()
|
controls_bar_frame1 = QFrame()
|
||||||
controls_bar_frame1.setFrameShape(QFrame.StyledPanel)
|
controls_bar_frame1.setFrameShape(QFrame.StyledPanel)
|
||||||
controls_bar_frame1.setSizePolicy (QSizePolicy.Fixed, QSizePolicy.Fixed)
|
controls_bar_frame1.setSizePolicy (QSizePolicy.Fixed, QSizePolicy.Fixed)
|
||||||
|
@ -274,12 +283,13 @@ class QCanvasOperator(QWidget):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.cbar = cbar
|
self.cbar = cbar
|
||||||
|
|
||||||
self.set_cbar_disabled(initialize=False)
|
self.set_cbar_disabled()
|
||||||
|
|
||||||
self.cbar.btn_poly_color_red_act.triggered.connect ( lambda : self.set_color_scheme_id(0) )
|
self.cbar.btn_poly_color_red_act.triggered.connect ( lambda : self.set_color_scheme_id(0) )
|
||||||
self.cbar.btn_poly_color_green_act.triggered.connect ( lambda : self.set_color_scheme_id(1) )
|
self.cbar.btn_poly_color_green_act.triggered.connect ( lambda : self.set_color_scheme_id(1) )
|
||||||
self.cbar.btn_poly_color_blue_act.triggered.connect ( lambda : self.set_color_scheme_id(2) )
|
self.cbar.btn_poly_color_blue_act.triggered.connect ( lambda : self.set_color_scheme_id(2) )
|
||||||
self.cbar.btn_view_baked_mask_act.toggled.connect ( self.set_view_baked_mask )
|
self.cbar.btn_view_baked_mask_act.toggled.connect ( lambda : self.set_op_mode(OpMode.VIEW_BAKED) )
|
||||||
|
self.cbar.btn_view_xseg_mask_act.toggled.connect ( self.set_view_xseg_mask )
|
||||||
|
|
||||||
self.cbar.btn_poly_type_include_act.triggered.connect ( lambda : self.set_poly_include_type(SegIEPolyType.INCLUDE) )
|
self.cbar.btn_poly_type_include_act.triggered.connect ( lambda : self.set_poly_include_type(SegIEPolyType.INCLUDE) )
|
||||||
self.cbar.btn_poly_type_exclude_act.triggered.connect ( lambda : self.set_poly_include_type(SegIEPolyType.EXCLUDE) )
|
self.cbar.btn_poly_type_exclude_act.triggered.connect ( lambda : self.set_poly_include_type(SegIEPolyType.EXCLUDE) )
|
||||||
|
@ -298,10 +308,19 @@ class QCanvasOperator(QWidget):
|
||||||
|
|
||||||
self.qp = QPainter()
|
self.qp = QPainter()
|
||||||
self.initialized = False
|
self.initialized = False
|
||||||
|
self.last_state = None
|
||||||
|
|
||||||
def initialize(self, q_img, img_look_pt=None, view_scale=None, ie_polys=None, canvas_config=None ):
|
def initialize(self, q_img, img_look_pt=None, view_scale=None, ie_polys=None, xseg_mask=None, canvas_config=None ):
|
||||||
self.q_img = q_img
|
self.q_img = q_img
|
||||||
self.img_pixmap = QPixmap.fromImage(q_img)
|
self.img_pixmap = QPixmap.fromImage(q_img)
|
||||||
|
|
||||||
|
self.xseg_mask_pixmap = None
|
||||||
|
if xseg_mask is not None:
|
||||||
|
w,h = QSize_to_np ( q_img.size() )
|
||||||
|
xseg_mask = cv2.resize(xseg_mask, (w,h), cv2.INTER_CUBIC)
|
||||||
|
xseg_mask = (imagelib.normalize_channels(xseg_mask, 1) * 255).astype(np.uint8)
|
||||||
|
self.xseg_mask_pixmap = QPixmap.fromImage(QImage_from_np(xseg_mask))
|
||||||
|
|
||||||
self.img_size = QSize_to_np (self.img_pixmap.size())
|
self.img_size = QSize_to_np (self.img_pixmap.size())
|
||||||
|
|
||||||
self.img_look_pt = img_look_pt
|
self.img_look_pt = img_look_pt
|
||||||
|
@ -315,44 +334,48 @@ class QCanvasOperator(QWidget):
|
||||||
canvas_config = CanvasConfig()
|
canvas_config = CanvasConfig()
|
||||||
self.canvas_config = canvas_config
|
self.canvas_config = canvas_config
|
||||||
|
|
||||||
|
# UI init
|
||||||
|
self.set_cbar_disabled()
|
||||||
|
self.cbar.btn_poly_color_act_grp.setDisabled(False)
|
||||||
|
self.cbar.btn_poly_type_act_grp.setDisabled(False)
|
||||||
|
|
||||||
|
# Initial vars
|
||||||
self.current_cursor = None
|
self.current_cursor = None
|
||||||
|
|
||||||
|
|
||||||
self.mouse_hull_poly = None
|
self.mouse_hull_poly = None
|
||||||
self.mouse_wire_poly = None
|
self.mouse_wire_poly = None
|
||||||
|
|
||||||
self.drag_type = DragType.NONE
|
self.drag_type = DragType.NONE
|
||||||
self.op_mode = None
|
|
||||||
self.pt_edit_mode = None
|
|
||||||
|
|
||||||
if not hasattr(self, 'color_scheme_id' ):
|
|
||||||
self.color_scheme_id = 1
|
|
||||||
self.set_color_scheme_id(self.color_scheme_id)
|
|
||||||
|
|
||||||
|
# Initial state
|
||||||
self.set_op_mode(OpMode.NONE)
|
self.set_op_mode(OpMode.NONE)
|
||||||
|
self.set_color_scheme_id(1)
|
||||||
|
self.set_poly_include_type(SegIEPolyType.INCLUDE)
|
||||||
self.set_pt_edit_mode(PTEditMode.MOVE)
|
self.set_pt_edit_mode(PTEditMode.MOVE)
|
||||||
self.set_view_baked_mask(False)
|
|
||||||
|
|
||||||
self.set_cbar_disabled(initialize=True)
|
# Apply last state
|
||||||
|
if self.last_state is not None:
|
||||||
if not hasattr(self, 'poly_include_type' ):
|
self.set_color_scheme_id(self.last_state.color_scheme_id)
|
||||||
self.poly_include_type = SegIEPolyType.INCLUDE
|
if self.last_state.op_mode is not None:
|
||||||
self.set_poly_include_type(self.poly_include_type)
|
self.set_op_mode(self.last_state.op_mode)
|
||||||
|
|
||||||
|
self.initialized = True
|
||||||
|
|
||||||
self.setMouseTracking(True)
|
self.setMouseTracking(True)
|
||||||
self.update_cursor()
|
self.update_cursor()
|
||||||
self.update()
|
self.update()
|
||||||
self.initialized = True
|
|
||||||
|
|
||||||
def finalize(self):
|
def finalize(self):
|
||||||
if self.initialized:
|
if self.initialized:
|
||||||
|
|
||||||
|
self.last_state = sn(op_mode = self.op_mode if self.op_mode in [OpMode.VIEW_BAKED, OpMode.VIEW_XSEG_MASK] else None,
|
||||||
|
color_scheme_id = self.color_scheme_id,
|
||||||
|
)
|
||||||
|
|
||||||
self.img_pixmap = None
|
self.img_pixmap = None
|
||||||
self.update_cursor(is_finalize=True)
|
self.update_cursor(is_finalize=True)
|
||||||
self.setMouseTracking(False)
|
self.setMouseTracking(False)
|
||||||
self.setFocusPolicy(Qt.NoFocus)
|
self.setFocusPolicy(Qt.NoFocus)
|
||||||
self.set_cbar_disabled(initialize=False)
|
self.set_cbar_disabled()
|
||||||
self.initialized = False
|
self.initialized = False
|
||||||
self.update()
|
self.update()
|
||||||
|
|
||||||
|
@ -445,16 +468,18 @@ class QCanvasOperator(QWidget):
|
||||||
# ====================================== SETTERS =====================================
|
# ====================================== SETTERS =====================================
|
||||||
# ====================================================================================
|
# ====================================================================================
|
||||||
# ====================================================================================
|
# ====================================================================================
|
||||||
|
|
||||||
def set_op_mode(self, op_mode, op_poly=None):
|
def set_op_mode(self, op_mode, op_poly=None):
|
||||||
if op_mode != self.op_mode:
|
if not hasattr(self,'op_mode'):
|
||||||
|
self.op_mode = None
|
||||||
|
self.op_poly = None
|
||||||
|
|
||||||
|
if self.op_mode != op_mode:
|
||||||
|
# Finalize prev mode
|
||||||
if self.op_mode == OpMode.NONE:
|
if self.op_mode == OpMode.NONE:
|
||||||
self.cbar.btn_poly_type_act_grp.setDisabled(True)
|
self.cbar.btn_poly_type_act_grp.setDisabled(True)
|
||||||
elif self.op_mode == OpMode.DRAW_PTS:
|
elif self.op_mode == OpMode.DRAW_PTS:
|
||||||
self.cbar.btn_undo_pt_act.setDisabled(True)
|
self.cbar.btn_undo_pt_act.setDisabled(True)
|
||||||
self.cbar.btn_redo_pt_act.setDisabled(True)
|
self.cbar.btn_redo_pt_act.setDisabled(True)
|
||||||
|
|
||||||
if self.op_poly.get_pts_count() < 3:
|
if self.op_poly.get_pts_count() < 3:
|
||||||
# Remove unfinished poly
|
# Remove unfinished poly
|
||||||
self.ie_polys.remove_poly(self.op_poly)
|
self.ie_polys.remove_poly(self.op_poly)
|
||||||
|
@ -463,59 +488,69 @@ class QCanvasOperator(QWidget):
|
||||||
self.cbar.btn_delete_poly_act.setDisabled(True)
|
self.cbar.btn_delete_poly_act.setDisabled(True)
|
||||||
# Reset pt_edit_move when exit from EDIT_PTS
|
# Reset pt_edit_move when exit from EDIT_PTS
|
||||||
self.set_pt_edit_mode(PTEditMode.MOVE)
|
self.set_pt_edit_mode(PTEditMode.MOVE)
|
||||||
|
elif self.op_mode == OpMode.VIEW_BAKED:
|
||||||
|
self.cbar.btn_view_baked_mask_act.setChecked(False)
|
||||||
|
elif self.op_mode == OpMode.VIEW_XSEG_MASK:
|
||||||
|
self.cbar.btn_view_xseg_mask_act.setChecked(False)
|
||||||
|
|
||||||
self.op_mode = op_mode
|
self.op_mode = op_mode
|
||||||
|
|
||||||
if self.op_mode == OpMode.NONE:
|
# Initialize new mode
|
||||||
|
if op_mode == OpMode.NONE:
|
||||||
self.cbar.btn_poly_type_act_grp.setDisabled(False)
|
self.cbar.btn_poly_type_act_grp.setDisabled(False)
|
||||||
elif self.op_mode == OpMode.DRAW_PTS:
|
elif op_mode == OpMode.DRAW_PTS:
|
||||||
self.cbar.btn_undo_pt_act.setDisabled(False)
|
self.cbar.btn_undo_pt_act.setDisabled(False)
|
||||||
self.cbar.btn_redo_pt_act.setDisabled(False)
|
self.cbar.btn_redo_pt_act.setDisabled(False)
|
||||||
elif self.op_mode == OpMode.EDIT_PTS:
|
elif op_mode == OpMode.EDIT_PTS:
|
||||||
self.cbar.btn_pt_edit_mode_act.setDisabled(False)
|
self.cbar.btn_pt_edit_mode_act.setDisabled(False)
|
||||||
self.cbar.btn_delete_poly_act.setDisabled(False)
|
self.cbar.btn_delete_poly_act.setDisabled(False)
|
||||||
|
elif op_mode == OpMode.VIEW_BAKED:
|
||||||
if self.op_mode in [OpMode.DRAW_PTS, OpMode.EDIT_PTS]:
|
self.cbar.btn_view_baked_mask_act.setChecked(True )
|
||||||
|
n = QImage_to_np ( self.q_img ).astype(np.float32) / 255.0
|
||||||
|
h,w,c = n.shape
|
||||||
|
mask = np.zeros( (h,w,1), dtype=np.float32 )
|
||||||
|
self.ie_polys.overlay_mask(mask)
|
||||||
|
n = (mask*255).astype(np.uint8)
|
||||||
|
self.img_baked_pixmap = QPixmap.fromImage(QImage_from_np(n))
|
||||||
|
elif op_mode == OpMode.VIEW_XSEG_MASK:
|
||||||
|
self.cbar.btn_view_xseg_mask_act.setChecked(True)
|
||||||
|
if op_mode in [OpMode.DRAW_PTS, OpMode.EDIT_PTS]:
|
||||||
self.mouse_op_poly_pt_id = None
|
self.mouse_op_poly_pt_id = None
|
||||||
self.mouse_op_poly_edge_id = None
|
self.mouse_op_poly_edge_id = None
|
||||||
self.mouse_op_poly_edge_id_pt = None
|
self.mouse_op_poly_edge_id_pt = None
|
||||||
|
#
|
||||||
|
self.op_poly = op_poly
|
||||||
|
if op_poly is not None:
|
||||||
|
self.update_mouse_info()
|
||||||
|
|
||||||
self.set_op_poly(op_poly)
|
|
||||||
self.update_cursor()
|
self.update_cursor()
|
||||||
self.update()
|
self.update()
|
||||||
|
|
||||||
def set_op_poly(self, op_poly):
|
|
||||||
self.op_poly = op_poly
|
|
||||||
if op_poly is not None:
|
|
||||||
self.update_mouse_info()
|
|
||||||
self.update()
|
|
||||||
|
|
||||||
def set_pt_edit_mode(self, pt_edit_mode):
|
def set_pt_edit_mode(self, pt_edit_mode):
|
||||||
if self.pt_edit_mode != pt_edit_mode:
|
if not hasattr(self, 'pt_edit_mode') or self.pt_edit_mode != pt_edit_mode:
|
||||||
self.pt_edit_mode = pt_edit_mode
|
self.pt_edit_mode = pt_edit_mode
|
||||||
self.update_cursor()
|
self.update_cursor()
|
||||||
self.update()
|
self.update()
|
||||||
|
|
||||||
self.cbar.btn_pt_edit_mode_act.setChecked( self.pt_edit_mode == PTEditMode.ADD_DEL )
|
self.cbar.btn_pt_edit_mode_act.setChecked( self.pt_edit_mode == PTEditMode.ADD_DEL )
|
||||||
|
|
||||||
def set_cbar_disabled(self, initialize):
|
def set_cbar_disabled(self):
|
||||||
self.cbar.btn_delete_poly_act.setDisabled(True)
|
self.cbar.btn_delete_poly_act.setDisabled(True)
|
||||||
self.cbar.btn_undo_pt_act.setDisabled(True)
|
self.cbar.btn_undo_pt_act.setDisabled(True)
|
||||||
self.cbar.btn_redo_pt_act.setDisabled(True)
|
self.cbar.btn_redo_pt_act.setDisabled(True)
|
||||||
self.cbar.btn_pt_edit_mode_act.setDisabled(True)
|
self.cbar.btn_pt_edit_mode_act.setDisabled(True)
|
||||||
|
self.cbar.btn_poly_color_act_grp.setDisabled(True)
|
||||||
if initialize:
|
self.cbar.btn_poly_type_act_grp.setDisabled(True)
|
||||||
self.cbar.btn_poly_color_act_grp.setDisabled(False)
|
|
||||||
self.cbar.btn_poly_type_act_grp.setDisabled(False)
|
|
||||||
else:
|
|
||||||
self.cbar.btn_poly_color_act_grp.setDisabled(True)
|
|
||||||
self.cbar.btn_poly_type_act_grp.setDisabled(True)
|
|
||||||
|
|
||||||
def set_color_scheme_id(self, id):
|
def set_color_scheme_id(self, id):
|
||||||
if self.color_scheme_id != id:
|
if self.op_mode == OpMode.VIEW_BAKED:
|
||||||
|
self.set_op_mode(OpMode.NONE)
|
||||||
|
|
||||||
|
if not hasattr(self, 'color_scheme_id') or self.color_scheme_id != id:
|
||||||
self.color_scheme_id = id
|
self.color_scheme_id = id
|
||||||
self.update_cursor()
|
self.update_cursor()
|
||||||
self.update()
|
self.update()
|
||||||
|
|
||||||
if self.color_scheme_id == 0:
|
if self.color_scheme_id == 0:
|
||||||
self.cbar.btn_poly_color_red_act.setChecked( True )
|
self.cbar.btn_poly_color_red_act.setChecked( True )
|
||||||
elif self.color_scheme_id == 1:
|
elif self.color_scheme_id == 1:
|
||||||
|
@ -524,33 +559,33 @@ class QCanvasOperator(QWidget):
|
||||||
self.cbar.btn_poly_color_blue_act.setChecked( True )
|
self.cbar.btn_poly_color_blue_act.setChecked( True )
|
||||||
|
|
||||||
def set_poly_include_type(self, poly_include_type):
|
def set_poly_include_type(self, poly_include_type):
|
||||||
if self.op_mode in [OpMode.NONE, OpMode.EDIT_PTS]:
|
if not hasattr(self, 'poly_include_type' ) or \
|
||||||
if self.poly_include_type != poly_include_type:
|
( self.poly_include_type != poly_include_type and \
|
||||||
self.poly_include_type = poly_include_type
|
self.op_mode in [OpMode.NONE, OpMode.EDIT_PTS] ):
|
||||||
self.update()
|
self.poly_include_type = poly_include_type
|
||||||
|
self.update()
|
||||||
|
|
||||||
self.cbar.btn_poly_type_include_act.setChecked(self.poly_include_type == SegIEPolyType.INCLUDE)
|
self.cbar.btn_poly_type_include_act.setChecked(self.poly_include_type == SegIEPolyType.INCLUDE)
|
||||||
self.cbar.btn_poly_type_exclude_act.setChecked(self.poly_include_type == SegIEPolyType.EXCLUDE)
|
self.cbar.btn_poly_type_exclude_act.setChecked(self.poly_include_type == SegIEPolyType.EXCLUDE)
|
||||||
|
|
||||||
|
def set_view_xseg_mask(self, is_checked):
|
||||||
|
|
||||||
def set_view_baked_mask(self, is_checked):
|
|
||||||
if is_checked:
|
if is_checked:
|
||||||
self.set_op_mode(OpMode.VIEW_BAKED)
|
self.set_op_mode(OpMode.VIEW_XSEG_MASK)
|
||||||
|
|
||||||
n = QImage_to_np ( self.q_img ).astype(np.float32) / 255.0
|
#n = QImage_to_np ( self.q_img ).astype(np.float32) / 255.0
|
||||||
h,w,c = n.shape
|
#h,w,c = n.shape
|
||||||
|
|
||||||
mask = np.zeros( (h,w,1), dtype=np.float32 )
|
#mask = np.zeros( (h,w,1), dtype=np.float32 )
|
||||||
self.ie_polys.overlay_mask(mask)
|
#self.ie_polys.overlay_mask(mask)
|
||||||
|
|
||||||
n = (mask*255).astype(np.uint8)
|
#n = (mask*255).astype(np.uint8)
|
||||||
|
|
||||||
self.img_baked_pixmap = QPixmap.fromImage(QImage_from_np(n))
|
#self.img_baked_pixmap = QPixmap.fromImage(QImage_from_np(n))
|
||||||
else:
|
else:
|
||||||
self.set_op_mode(OpMode.NONE)
|
self.set_op_mode(OpMode.NONE)
|
||||||
|
|
||||||
self.cbar.btn_view_baked_mask_act.setChecked(is_checked )
|
self.cbar.btn_view_xseg_mask_act.setChecked(is_checked )
|
||||||
|
|
||||||
|
|
||||||
# ====================================================================================
|
# ====================================================================================
|
||||||
# ====================================================================================
|
# ====================================================================================
|
||||||
|
@ -764,7 +799,6 @@ class QCanvasOperator(QWidget):
|
||||||
# other cases -> unselect poly
|
# other cases -> unselect poly
|
||||||
self.set_op_mode(OpMode.NONE)
|
self.set_op_mode(OpMode.NONE)
|
||||||
|
|
||||||
|
|
||||||
elif btn == Qt.MiddleButton:
|
elif btn == Qt.MiddleButton:
|
||||||
if self.drag_type == DragType.NONE:
|
if self.drag_type == DragType.NONE:
|
||||||
# Start image drag
|
# Start image drag
|
||||||
|
@ -773,6 +807,7 @@ class QCanvasOperator(QWidget):
|
||||||
self.drag_img_look_pt = self.get_img_look_pt()
|
self.drag_img_look_pt = self.get_img_look_pt()
|
||||||
self.update_cursor()
|
self.update_cursor()
|
||||||
|
|
||||||
|
|
||||||
def mouseReleaseEvent(self, ev):
|
def mouseReleaseEvent(self, ev):
|
||||||
super().mouseReleaseEvent(ev)
|
super().mouseReleaseEvent(ev)
|
||||||
if not self.initialized:
|
if not self.initialized:
|
||||||
|
@ -855,6 +890,11 @@ class QCanvasOperator(QWidget):
|
||||||
src_rect = QRect(0, 0, *self.img_size)
|
src_rect = QRect(0, 0, *self.img_size)
|
||||||
dst_rect = self.img_to_cli_rect( src_rect )
|
dst_rect = self.img_to_cli_rect( src_rect )
|
||||||
qp.drawPixmap(dst_rect, self.img_baked_pixmap, src_rect)
|
qp.drawPixmap(dst_rect, self.img_baked_pixmap, src_rect)
|
||||||
|
elif self.op_mode == OpMode.VIEW_XSEG_MASK:
|
||||||
|
if self.xseg_mask_pixmap is not None:
|
||||||
|
src_rect = QRect(0, 0, *self.img_size)
|
||||||
|
dst_rect = self.img_to_cli_rect( src_rect )
|
||||||
|
qp.drawPixmap(dst_rect, self.xseg_mask_pixmap, src_rect)
|
||||||
else:
|
else:
|
||||||
if self.img_pixmap is not None:
|
if self.img_pixmap is not None:
|
||||||
src_rect = QRect(0, 0, *self.img_size)
|
src_rect = QRect(0, 0, *self.img_size)
|
||||||
|
@ -980,6 +1020,7 @@ class QCanvas(QFrame):
|
||||||
btn_poly_color_green_act = self.canvas_control_right_bar.btn_poly_color_green_act,
|
btn_poly_color_green_act = self.canvas_control_right_bar.btn_poly_color_green_act,
|
||||||
btn_poly_color_blue_act = self.canvas_control_right_bar.btn_poly_color_blue_act,
|
btn_poly_color_blue_act = self.canvas_control_right_bar.btn_poly_color_blue_act,
|
||||||
btn_view_baked_mask_act = self.canvas_control_right_bar.btn_view_baked_mask_act,
|
btn_view_baked_mask_act = self.canvas_control_right_bar.btn_view_baked_mask_act,
|
||||||
|
btn_view_xseg_mask_act = self.canvas_control_right_bar.btn_view_xseg_mask_act,
|
||||||
btn_poly_color_act_grp = self.canvas_control_right_bar.btn_poly_color_act_grp,
|
btn_poly_color_act_grp = self.canvas_control_right_bar.btn_poly_color_act_grp,
|
||||||
|
|
||||||
btn_poly_type_include_act = self.canvas_control_left_bar.btn_poly_type_include_act,
|
btn_poly_type_include_act = self.canvas_control_left_bar.btn_poly_type_include_act,
|
||||||
|
@ -1124,9 +1165,9 @@ class MainWindow(QXMainWindow):
|
||||||
if img is None:
|
if img is None:
|
||||||
img = QImage_from_np(cv2_imread(image_path))
|
img = QImage_from_np(cv2_imread(image_path))
|
||||||
if img is None:
|
if img is None:
|
||||||
raise Exception(f'Unable to load {image_path}')
|
io.log_err(f'Unable to load {image_path}')
|
||||||
except:
|
except:
|
||||||
io.log_err(f"{traceback.format_exc()}")
|
img = None
|
||||||
|
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
@ -1143,25 +1184,32 @@ class MainWindow(QXMainWindow):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
dflimg = DFLIMG.load(image_path)
|
dflimg = DFLIMG.load(image_path)
|
||||||
ie_polys = dflimg.get_seg_ie_polys()
|
if not dflimg or not dflimg.has_data():
|
||||||
q_img = self.load_QImage(image_path)
|
return False
|
||||||
|
|
||||||
self.canvas.op.initialize ( q_img, ie_polys=ie_polys )
|
ie_polys = dflimg.get_seg_ie_polys()
|
||||||
|
xseg_mask = dflimg.get_xseg_mask()
|
||||||
|
q_img = self.load_QImage(image_path)
|
||||||
|
if q_img is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.canvas.op.initialize ( q_img, ie_polys=ie_polys, xseg_mask=xseg_mask )
|
||||||
|
|
||||||
self.filename_label.setText(str(image_path.name))
|
self.filename_label.setText(str(image_path.name))
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def canvas_finalize(self, image_path):
|
def canvas_finalize(self, image_path):
|
||||||
dflimg = DFLIMG.load(image_path)
|
|
||||||
|
|
||||||
ie_polys = dflimg.get_seg_ie_polys()
|
if image_path.exists():
|
||||||
new_ie_polys = self.canvas.op.get_ie_polys()
|
dflimg = DFLIMG.load(image_path)
|
||||||
|
ie_polys = dflimg.get_seg_ie_polys()
|
||||||
|
new_ie_polys = self.canvas.op.get_ie_polys()
|
||||||
|
|
||||||
if not new_ie_polys.identical(ie_polys):
|
if not new_ie_polys.identical(ie_polys):
|
||||||
self.image_paths_has_ie_polys[image_path] = new_ie_polys.has_polys()
|
self.image_paths_has_ie_polys[image_path] = new_ie_polys.has_polys()
|
||||||
dflimg.set_seg_ie_polys( new_ie_polys )
|
dflimg.set_seg_ie_polys( new_ie_polys )
|
||||||
dflimg.save()
|
dflimg.save()
|
||||||
|
|
||||||
self.canvas.op.finalize()
|
self.canvas.op.finalize()
|
||||||
self.filename_label.setText("")
|
self.filename_label.setText("")
|
||||||
|
@ -1183,8 +1231,9 @@ class MainWindow(QXMainWindow):
|
||||||
if len(self.image_paths) == 0:
|
if len(self.image_paths) == 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
ret = self.canvas_initialize(self.image_paths[0], len(self.image_paths_done) != 0 and only_has_polys)
|
||||||
|
|
||||||
if self.canvas_initialize(self.image_paths[0], len(self.image_paths_done) != 0 and only_has_polys):
|
if ret or len(self.image_paths_done) == 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
self.update_cached_images()
|
self.update_cached_images()
|
||||||
|
|
BIN
XSegEditor/gfx/icons/view_xseg.png
Normal file
BIN
XSegEditor/gfx/icons/view_xseg.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 9.5 KiB |
BIN
facelib/3DFAN.npy
Normal file
BIN
facelib/3DFAN.npy
Normal file
Binary file not shown.
|
@ -13,8 +13,9 @@ from core.leras import nn
|
||||||
ported from https://github.com/1adrianb/face-alignment
|
ported from https://github.com/1adrianb/face-alignment
|
||||||
"""
|
"""
|
||||||
class FANExtractor(object):
|
class FANExtractor(object):
|
||||||
def __init__ (self, place_model_on_cpu=False):
|
def __init__ (self, landmarks_3D=False, place_model_on_cpu=False):
|
||||||
model_path = Path(__file__).parent / "FAN.npy"
|
|
||||||
|
model_path = Path(__file__).parent / ( "2DFAN.npy" if not landmarks_3D else "3DFAN.npy")
|
||||||
if not model_path.exists():
|
if not model_path.exists():
|
||||||
raise Exception("Unable to load FANExtractor model")
|
raise Exception("Unable to load FANExtractor model")
|
||||||
|
|
||||||
|
|
|
@ -2,16 +2,15 @@ from enum import IntEnum
|
||||||
|
|
||||||
class FaceType(IntEnum):
|
class FaceType(IntEnum):
|
||||||
#enumerating in order "next contains prev"
|
#enumerating in order "next contains prev"
|
||||||
MOUTH = -1
|
|
||||||
HALF = 0
|
HALF = 0
|
||||||
MID_FULL = 1
|
MID_FULL = 1
|
||||||
FULL = 2
|
FULL = 2
|
||||||
FULL_NO_ALIGN = 3
|
FULL_NO_ALIGN = 3
|
||||||
WHOLE_FACE = 4
|
WHOLE_FACE = 4
|
||||||
HEAD = 5
|
HEAD = 10
|
||||||
HEAD_NO_ALIGN = 6
|
HEAD_NO_ALIGN = 20
|
||||||
|
|
||||||
MARK_ONLY = 10, #no align at all, just embedded faceinfo
|
MARK_ONLY = 100, #no align at all, just embedded faceinfo
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fromString (s):
|
def fromString (s):
|
||||||
|
@ -24,23 +23,15 @@ class FaceType(IntEnum):
|
||||||
def toString (face_type):
|
def toString (face_type):
|
||||||
return to_string_dict[face_type]
|
return to_string_dict[face_type]
|
||||||
|
|
||||||
from_string_dict = {'mouth': FaceType.MOUTH,
|
to_string_dict = { FaceType.HALF : 'half_face',
|
||||||
'half_face': FaceType.HALF,
|
|
||||||
'midfull_face': FaceType.MID_FULL,
|
|
||||||
'full_face': FaceType.FULL,
|
|
||||||
'whole_face': FaceType.WHOLE_FACE,
|
|
||||||
'head' : FaceType.HEAD,
|
|
||||||
'mark_only' : FaceType.MARK_ONLY,
|
|
||||||
'full_face_no_align' : FaceType.FULL_NO_ALIGN,
|
|
||||||
'head_no_align' : FaceType.HEAD_NO_ALIGN,
|
|
||||||
}
|
|
||||||
to_string_dict = { FaceType.MOUTH : 'mouth',
|
|
||||||
FaceType.HALF : 'half_face',
|
|
||||||
FaceType.MID_FULL : 'midfull_face',
|
FaceType.MID_FULL : 'midfull_face',
|
||||||
FaceType.FULL : 'full_face',
|
FaceType.FULL : 'full_face',
|
||||||
|
FaceType.FULL_NO_ALIGN : 'full_face_no_align',
|
||||||
FaceType.WHOLE_FACE : 'whole_face',
|
FaceType.WHOLE_FACE : 'whole_face',
|
||||||
FaceType.HEAD : 'head',
|
FaceType.HEAD : 'head',
|
||||||
|
FaceType.HEAD_NO_ALIGN : 'head_no_align',
|
||||||
|
|
||||||
FaceType.MARK_ONLY :'mark_only',
|
FaceType.MARK_ONLY :'mark_only',
|
||||||
FaceType.FULL_NO_ALIGN : 'full_face_no_align',
|
|
||||||
FaceType.HEAD_NO_ALIGN : 'head_no_align'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
from_string_dict = { to_string_dict[x] : x for x in to_string_dict.keys() }
|
|
@ -134,86 +134,85 @@ landmarks_68_pt = { "mouth": (48,68),
|
||||||
"nose": (27, 36), # missed one point
|
"nose": (27, 36), # missed one point
|
||||||
"jaw": (0, 17) }
|
"jaw": (0, 17) }
|
||||||
|
|
||||||
|
|
||||||
landmarks_68_3D = np.array( [
|
landmarks_68_3D = np.array( [
|
||||||
[-73.393523 , -29.801432 , 47.667532 ],
|
[-73.393523 , -29.801432 , 47.667532 ], #00
|
||||||
[-72.775014 , -10.949766 , 45.909403 ],
|
[-72.775014 , -10.949766 , 45.909403 ], #01
|
||||||
[-70.533638 , 7.929818 , 44.842580 ],
|
[-70.533638 , 7.929818 , 44.842580 ], #02
|
||||||
[-66.850058 , 26.074280 , 43.141114 ],
|
[-66.850058 , 26.074280 , 43.141114 ], #03
|
||||||
[-59.790187 , 42.564390 , 38.635298 ],
|
[-59.790187 , 42.564390 , 38.635298 ], #04
|
||||||
[-48.368973 , 56.481080 , 30.750622 ],
|
[-48.368973 , 56.481080 , 30.750622 ], #05
|
||||||
[-34.121101 , 67.246992 , 18.456453 ],
|
[-34.121101 , 67.246992 , 18.456453 ], #06
|
||||||
[-17.875411 , 75.056892 , 3.609035 ],
|
[-17.875411 , 75.056892 , 3.609035 ], #07
|
||||||
[0.098749 , 77.061286 , -0.881698 ],
|
[0.098749 , 77.061286 , -0.881698 ], #08
|
||||||
[17.477031 , 74.758448 , 5.181201 ],
|
[17.477031 , 74.758448 , 5.181201 ], #09
|
||||||
[32.648966 , 66.929021 , 19.176563 ],
|
[32.648966 , 66.929021 , 19.176563 ], #10
|
||||||
[46.372358 , 56.311389 , 30.770570 ],
|
[46.372358 , 56.311389 , 30.770570 ], #11
|
||||||
[57.343480 , 42.419126 , 37.628629 ],
|
[57.343480 , 42.419126 , 37.628629 ], #12
|
||||||
[64.388482 , 25.455880 , 40.886309 ],
|
[64.388482 , 25.455880 , 40.886309 ], #13
|
||||||
[68.212038 , 6.990805 , 42.281449 ],
|
[68.212038 , 6.990805 , 42.281449 ], #14
|
||||||
[70.486405 , -11.666193 , 44.142567 ],
|
[70.486405 , -11.666193 , 44.142567 ], #15
|
||||||
[71.375822 , -30.365191 , 47.140426 ],
|
[71.375822 , -30.365191 , 47.140426 ], #16
|
||||||
[-61.119406 , -49.361602 , 14.254422 ],
|
[-61.119406 , -49.361602 , 14.254422 ], #17
|
||||||
[-51.287588 , -58.769795 , 7.268147 ],
|
[-51.287588 , -58.769795 , 7.268147 ], #18
|
||||||
[-37.804800 , -61.996155 , 0.442051 ],
|
[-37.804800 , -61.996155 , 0.442051 ], #19
|
||||||
[-24.022754 , -61.033399 , -6.606501 ],
|
[-24.022754 , -61.033399 , -6.606501 ], #20
|
||||||
[-11.635713 , -56.686759 , -11.967398 ],
|
[-11.635713 , -56.686759 , -11.967398 ], #21
|
||||||
[12.056636 , -57.391033 , -12.051204 ],
|
[12.056636 , -57.391033 , -12.051204 ], #22
|
||||||
[25.106256 , -61.902186 , -7.315098 ],
|
[25.106256 , -61.902186 , -7.315098 ], #23
|
||||||
[38.338588 , -62.777713 , -1.022953 ],
|
[38.338588 , -62.777713 , -1.022953 ], #24
|
||||||
[51.191007 , -59.302347 , 5.349435 ],
|
[51.191007 , -59.302347 , 5.349435 ], #25
|
||||||
[60.053851 , -50.190255 , 11.615746 ],
|
[60.053851 , -50.190255 , 11.615746 ], #26
|
||||||
[0.653940 , -42.193790 , -13.380835 ],
|
[0.653940 , -42.193790 , -13.380835 ], #27
|
||||||
[0.804809 , -30.993721 , -21.150853 ],
|
[0.804809 , -30.993721 , -21.150853 ], #28
|
||||||
[0.992204 , -19.944596 , -29.284036 ],
|
[0.992204 , -19.944596 , -29.284036 ], #29
|
||||||
[1.226783 , -8.414541 , -36.948060 ],
|
[1.226783 , -8.414541 , -36.948060 ], #00
|
||||||
[-14.772472 , 2.598255 , -20.132003 ],
|
[-14.772472 , 2.598255 , -20.132003 ], #01
|
||||||
[-7.180239 , 4.751589 , -23.536684 ],
|
[-7.180239 , 4.751589 , -23.536684 ], #02
|
||||||
[0.555920 , 6.562900 , -25.944448 ],
|
[0.555920 , 6.562900 , -25.944448 ], #03
|
||||||
[8.272499 , 4.661005 , -23.695741 ],
|
[8.272499 , 4.661005 , -23.695741 ], #04
|
||||||
[15.214351 , 2.643046 , -20.858157 ],
|
[15.214351 , 2.643046 , -20.858157 ], #05
|
||||||
[-46.047290 , -37.471411 , 7.037989 ],
|
[-46.047290 , -37.471411 , 7.037989 ], #06
|
||||||
[-37.674688 , -42.730510 , 3.021217 ],
|
[-37.674688 , -42.730510 , 3.021217 ], #07
|
||||||
[-27.883856 , -42.711517 , 1.353629 ],
|
[-27.883856 , -42.711517 , 1.353629 ], #08
|
||||||
[-19.648268 , -36.754742 , -0.111088 ],
|
[-19.648268 , -36.754742 , -0.111088 ], #09
|
||||||
[-28.272965 , -35.134493 , -0.147273 ],
|
[-28.272965 , -35.134493 , -0.147273 ], #10
|
||||||
[-38.082418 , -34.919043 , 1.476612 ],
|
[-38.082418 , -34.919043 , 1.476612 ], #11
|
||||||
[19.265868 , -37.032306 , -0.665746 ],
|
[19.265868 , -37.032306 , -0.665746 ], #12
|
||||||
[27.894191 , -43.342445 , 0.247660 ],
|
[27.894191 , -43.342445 , 0.247660 ], #13
|
||||||
[37.437529 , -43.110822 , 1.696435 ],
|
[37.437529 , -43.110822 , 1.696435 ], #14
|
||||||
[45.170805 , -38.086515 , 4.894163 ],
|
[45.170805 , -38.086515 , 4.894163 ], #15
|
||||||
[38.196454 , -35.532024 , 0.282961 ],
|
[38.196454 , -35.532024 , 0.282961 ], #16
|
||||||
[28.764989 , -35.484289 , -1.172675 ],
|
[28.764989 , -35.484289 , -1.172675 ], #17
|
||||||
[-28.916267 , 28.612716 , -2.240310 ],
|
[-28.916267 , 28.612716 , -2.240310 ], #18
|
||||||
[-17.533194 , 22.172187 , -15.934335 ],
|
[-17.533194 , 22.172187 , -15.934335 ], #19
|
||||||
[-6.684590 , 19.029051 , -22.611355 ],
|
[-6.684590 , 19.029051 , -22.611355 ], #20
|
||||||
[0.381001 , 20.721118 , -23.748437 ],
|
[0.381001 , 20.721118 , -23.748437 ], #21
|
||||||
[8.375443 , 19.035460 , -22.721995 ],
|
[8.375443 , 19.035460 , -22.721995 ], #22
|
||||||
[18.876618 , 22.394109 , -15.610679 ],
|
[18.876618 , 22.394109 , -15.610679 ], #23
|
||||||
[28.794412 , 28.079924 , -3.217393 ],
|
[28.794412 , 28.079924 , -3.217393 ], #24
|
||||||
[19.057574 , 36.298248 , -14.987997 ],
|
[19.057574 , 36.298248 , -14.987997 ], #25
|
||||||
[8.956375 , 39.634575 , -22.554245 ],
|
[8.956375 , 39.634575 , -22.554245 ], #26
|
||||||
[0.381549 , 40.395647 , -23.591626 ],
|
[0.381549 , 40.395647 , -23.591626 ], #27
|
||||||
[-7.428895 , 39.836405 , -22.406106 ],
|
[-7.428895 , 39.836405 , -22.406106 ], #28
|
||||||
[-18.160634 , 36.677899 , -15.121907 ],
|
[-18.160634 , 36.677899 , -15.121907 ], #29
|
||||||
[-24.377490 , 28.677771 , -4.785684 ],
|
[-24.377490 , 28.677771 , -4.785684 ], #30
|
||||||
[-6.897633 , 25.475976 , -20.893742 ],
|
[-6.897633 , 25.475976 , -20.893742 ], #31
|
||||||
[0.340663 , 26.014269 , -22.220479 ],
|
[0.340663 , 26.014269 , -22.220479 ], #32
|
||||||
[8.444722 , 25.326198 , -21.025520 ],
|
[8.444722 , 25.326198 , -21.025520 ], #33
|
||||||
[24.474473 , 28.323008 , -5.712776 ],
|
[24.474473 , 28.323008 , -5.712776 ], #34
|
||||||
[8.449166 , 30.596216 , -20.671489 ],
|
[8.449166 , 30.596216 , -20.671489 ], #35
|
||||||
[0.205322 , 31.408738 , -21.903670 ],
|
[0.205322 , 31.408738 , -21.903670 ], #36
|
||||||
[-7.198266 , 30.844876 , -20.328022 ] ], dtype=np.float32)
|
[-7.198266 , 30.844876 , -20.328022 ] #37
|
||||||
|
], dtype=np.float32)
|
||||||
|
|
||||||
FaceType_to_padding_remove_align = {
|
FaceType_to_padding_remove_align = {
|
||||||
FaceType.MOUTH: (0.25, False),
|
|
||||||
FaceType.HALF: (0.0, False),
|
FaceType.HALF: (0.0, False),
|
||||||
FaceType.MID_FULL: (0.0675, False),
|
FaceType.MID_FULL: (0.0675, False),
|
||||||
FaceType.FULL: (0.2109375, False),
|
FaceType.FULL: (0.2109375, False),
|
||||||
FaceType.FULL_NO_ALIGN: (0.2109375, True),
|
FaceType.FULL_NO_ALIGN: (0.2109375, True),
|
||||||
FaceType.WHOLE_FACE: (0.40, False),
|
FaceType.WHOLE_FACE: (0.40, False),
|
||||||
FaceType.HEAD: (1.0, False),
|
FaceType.HEAD: (0.70, False),
|
||||||
FaceType.HEAD_NO_ALIGN: (1.0, True),
|
FaceType.HEAD_NO_ALIGN: (0.70, True),
|
||||||
}
|
}
|
||||||
|
|
||||||
def convert_98_to_68(lmrks):
|
def convert_98_to_68(lmrks):
|
||||||
|
@ -279,10 +278,7 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
||||||
|
|
||||||
|
|
||||||
# estimate landmarks transform from global space to local aligned space with bounds [0..1]
|
# estimate landmarks transform from global space to local aligned space with bounds [0..1]
|
||||||
if face_type == FaceType.MOUTH:
|
mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
|
||||||
mat = umeyama(image_landmarks[48:68], mouth_center_landmarks_2D, True)[0:2]
|
|
||||||
else:
|
|
||||||
mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
|
|
||||||
|
|
||||||
# get corner points in global space
|
# get corner points in global space
|
||||||
g_p = transform_points ( np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5) ]) , mat, True)
|
g_p = transform_points ( np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5) ]) , mat, True)
|
||||||
|
@ -299,13 +295,33 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
||||||
mod = (1.0 / scale)* ( npla.norm(g_p[0]-g_p[2])*(padding*np.sqrt(2.0) + 0.5) )
|
mod = (1.0 / scale)* ( npla.norm(g_p[0]-g_p[2])*(padding*np.sqrt(2.0) + 0.5) )
|
||||||
|
|
||||||
if face_type == FaceType.WHOLE_FACE:
|
if face_type == FaceType.WHOLE_FACE:
|
||||||
# adjust center for WHOLE_FACE, 7% below in order to cover more forehead
|
# adjust vertical offset for WHOLE_FACE, 7% below in order to cover more forehead
|
||||||
vec = (g_p[0]-g_p[3]).astype(np.float32)
|
vec = (g_p[0]-g_p[3]).astype(np.float32)
|
||||||
vec_len = npla.norm(vec)
|
vec_len = npla.norm(vec)
|
||||||
vec /= vec_len
|
vec /= vec_len
|
||||||
|
|
||||||
g_c += vec*vec_len*0.07
|
g_c += vec*vec_len*0.07
|
||||||
|
|
||||||
|
elif face_type == FaceType.HEAD:
|
||||||
|
mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
|
||||||
|
|
||||||
|
# assuming image_landmarks are 3D_Landmarks extracted for HEAD,
|
||||||
|
# adjust horizontal offset according to estimated yaw
|
||||||
|
yaw = estimate_averaged_yaw(transform_points (image_landmarks, mat, False))
|
||||||
|
|
||||||
|
hvec = (g_p[0]-g_p[1]).astype(np.float32)
|
||||||
|
hvec_len = npla.norm(hvec)
|
||||||
|
hvec /= hvec_len
|
||||||
|
|
||||||
|
yaw *= np.abs(math.tanh(yaw*2)) # Damp near zero
|
||||||
|
|
||||||
|
g_c -= hvec * (yaw * hvec_len / 2.0)
|
||||||
|
|
||||||
|
# adjust vertical offset for HEAD, 50% below
|
||||||
|
vvec = (g_p[0]-g_p[3]).astype(np.float32)
|
||||||
|
vvec_len = npla.norm(vvec)
|
||||||
|
vvec /= vvec_len
|
||||||
|
g_c += vvec*vvec_len*0.50
|
||||||
|
|
||||||
# calc 3 points in global space to estimate 2d affine transform
|
# calc 3 points in global space to estimate 2d affine transform
|
||||||
if not remove_align:
|
if not remove_align:
|
||||||
l_t = np.array( [ g_c - tb_diag_vec*mod,
|
l_t = np.array( [ g_c - tb_diag_vec*mod,
|
||||||
|
@ -700,9 +716,17 @@ def calc_face_pitch(landmarks):
|
||||||
b = landmarks[8][1]
|
b = landmarks[8][1]
|
||||||
return float(b-t)
|
return float(b-t)
|
||||||
|
|
||||||
|
def estimate_averaged_yaw(landmarks):
|
||||||
|
# Works much better than solvePnP if landmarks from "3DFAN"
|
||||||
|
if not isinstance(landmarks, np.ndarray):
|
||||||
|
landmarks = np.array (landmarks)
|
||||||
|
l = ( (landmarks[27][0]-landmarks[0][0]) + (landmarks[28][0]-landmarks[1][0]) + (landmarks[29][0]-landmarks[2][0]) ) / 3.0
|
||||||
|
r = ( (landmarks[16][0]-landmarks[27][0]) + (landmarks[15][0]-landmarks[28][0]) + (landmarks[14][0]-landmarks[29][0]) ) / 3.0
|
||||||
|
return float(r-l)
|
||||||
|
|
||||||
def estimate_pitch_yaw_roll(aligned_landmarks, size=256):
|
def estimate_pitch_yaw_roll(aligned_landmarks, size=256):
|
||||||
"""
|
"""
|
||||||
returns pitch,yaw,roll [-pi...+pi]
|
returns pitch,yaw,roll [-pi/2...+pi/2]
|
||||||
"""
|
"""
|
||||||
shape = (size,size)
|
shape = (size,size)
|
||||||
focal_length = shape[1]
|
focal_length = shape[1]
|
||||||
|
@ -712,16 +736,18 @@ def estimate_pitch_yaw_roll(aligned_landmarks, size=256):
|
||||||
[0, focal_length, camera_center[1]],
|
[0, focal_length, camera_center[1]],
|
||||||
[0, 0, 1]], dtype=np.float32)
|
[0, 0, 1]], dtype=np.float32)
|
||||||
|
|
||||||
(_, rotation_vector, translation_vector) = cv2.solvePnP(
|
(_, rotation_vector, _) = cv2.solvePnP(
|
||||||
landmarks_68_3D,
|
np.concatenate( (landmarks_68_3D[:27], landmarks_68_3D[30:36]) , axis=0) ,
|
||||||
aligned_landmarks.astype(np.float32),
|
np.concatenate( (aligned_landmarks[:27], aligned_landmarks[30:36]) , axis=0).astype(np.float32),
|
||||||
camera_matrix,
|
camera_matrix,
|
||||||
np.zeros((4, 1)) )
|
np.zeros((4, 1)) )
|
||||||
|
|
||||||
pitch, yaw, roll = mathlib.rotationMatrixToEulerAngles( cv2.Rodrigues(rotation_vector)[0] )
|
pitch, yaw, roll = mathlib.rotationMatrixToEulerAngles( cv2.Rodrigues(rotation_vector)[0] )
|
||||||
pitch = np.clip ( pitch, -math.pi, math.pi )
|
|
||||||
yaw = np.clip ( yaw , -math.pi, math.pi )
|
half_pi = math.pi / 2.0
|
||||||
roll = np.clip ( roll, -math.pi, math.pi )
|
pitch = np.clip ( pitch, -half_pi, half_pi )
|
||||||
|
yaw = np.clip ( yaw , -half_pi, half_pi )
|
||||||
|
roll = np.clip ( roll, -half_pi, half_pi )
|
||||||
|
|
||||||
return -pitch, yaw, roll
|
return -pitch, yaw, roll
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,9 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
self.rects_extractor = facelib.S3FDExtractor(place_model_on_cpu=place_model_on_cpu)
|
self.rects_extractor = facelib.S3FDExtractor(place_model_on_cpu=place_model_on_cpu)
|
||||||
|
|
||||||
if self.type == 'all' or 'landmarks' in self.type:
|
if self.type == 'all' or 'landmarks' in self.type:
|
||||||
self.landmarks_extractor = facelib.FANExtractor(place_model_on_cpu=place_model_on_cpu)
|
# for head type, extract "3D landmarks"
|
||||||
|
self.landmarks_extractor = facelib.FANExtractor(landmarks_3D=self.face_type >= FaceType.HEAD,
|
||||||
|
place_model_on_cpu=place_model_on_cpu)
|
||||||
|
|
||||||
self.cached_image = (None, None)
|
self.cached_image = (None, None)
|
||||||
|
|
||||||
|
@ -690,7 +692,7 @@ def main(detector=None,
|
||||||
):
|
):
|
||||||
face_type = FaceType.fromString(face_type)
|
face_type = FaceType.fromString(face_type)
|
||||||
|
|
||||||
image_size = 512
|
image_size = 512 if face_type < FaceType.HEAD else 768
|
||||||
|
|
||||||
if not input_path.exists():
|
if not input_path.exists():
|
||||||
io.log_err ('Input directory not found. Please ensure it exists.')
|
io.log_err ('Input directory not found. Please ensure it exists.')
|
||||||
|
|
|
@ -84,14 +84,19 @@ class InteractiveMergerSubprocessor(Subprocessor):
|
||||||
filepath = frame_info.filepath
|
filepath = frame_info.filepath
|
||||||
|
|
||||||
if len(frame_info.landmarks_list) == 0:
|
if len(frame_info.landmarks_list) == 0:
|
||||||
self.log_info (f'no faces found for {filepath.name}, copying without faces')
|
|
||||||
|
|
||||||
img_bgr = cv2_imread(filepath)
|
if cfg.mode == 'raw-predict':
|
||||||
imagelib.normalize_channels(img_bgr, 3)
|
h,w,c = self.predictor_input_shape
|
||||||
|
img_bgr = np.zeros( (h,w,3), dtype=np.uint8)
|
||||||
|
img_mask = np.zeros( (h,w,1), dtype=np.uint8)
|
||||||
|
else:
|
||||||
|
self.log_info (f'no faces found for {filepath.name}, copying without faces')
|
||||||
|
img_bgr = cv2_imread(filepath)
|
||||||
|
imagelib.normalize_channels(img_bgr, 3)
|
||||||
|
h,w,c = img_bgr.shape
|
||||||
|
img_mask = np.zeros( (h,w,1), dtype=img_bgr.dtype)
|
||||||
|
|
||||||
cv2_imwrite (pf.output_filepath, img_bgr)
|
cv2_imwrite (pf.output_filepath, img_bgr)
|
||||||
h,w,c = img_bgr.shape
|
|
||||||
|
|
||||||
img_mask = np.zeros( (h,w,1), dtype=img_bgr.dtype)
|
|
||||||
cv2_imwrite (pf.output_mask_filepath, img_mask)
|
cv2_imwrite (pf.output_mask_filepath, img_mask)
|
||||||
|
|
||||||
if pf.need_return_image:
|
if pf.need_return_image:
|
||||||
|
@ -300,6 +305,7 @@ class InteractiveMergerSubprocessor(Subprocessor):
|
||||||
'3' : lambda cfg,shift_pressed: cfg.set_mode(3),
|
'3' : lambda cfg,shift_pressed: cfg.set_mode(3),
|
||||||
'4' : lambda cfg,shift_pressed: cfg.set_mode(4),
|
'4' : lambda cfg,shift_pressed: cfg.set_mode(4),
|
||||||
'5' : lambda cfg,shift_pressed: cfg.set_mode(5),
|
'5' : lambda cfg,shift_pressed: cfg.set_mode(5),
|
||||||
|
'6' : lambda cfg,shift_pressed: cfg.set_mode(6),
|
||||||
'q' : lambda cfg,shift_pressed: cfg.add_hist_match_threshold(1 if not shift_pressed else 5),
|
'q' : lambda cfg,shift_pressed: cfg.add_hist_match_threshold(1 if not shift_pressed else 5),
|
||||||
'a' : lambda cfg,shift_pressed: cfg.add_hist_match_threshold(-1 if not shift_pressed else -5),
|
'a' : lambda cfg,shift_pressed: cfg.add_hist_match_threshold(-1 if not shift_pressed else -5),
|
||||||
'w' : lambda cfg,shift_pressed: cfg.add_erode_mask_modifier(1 if not shift_pressed else 5),
|
'w' : lambda cfg,shift_pressed: cfg.add_erode_mask_modifier(1 if not shift_pressed else 5),
|
||||||
|
|
|
@ -17,8 +17,6 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
|
||||||
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
||||||
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
||||||
|
|
||||||
if cfg.mode == 'original':
|
|
||||||
return img_bgr, img_face_mask_a
|
|
||||||
|
|
||||||
out_img = img_bgr.copy()
|
out_img = img_bgr.copy()
|
||||||
out_merging_mask_a = None
|
out_merging_mask_a = None
|
||||||
|
@ -46,16 +44,9 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
|
||||||
predictor_input_bgr = cv2.resize (dst_face_bgr, (input_size,input_size) )
|
predictor_input_bgr = cv2.resize (dst_face_bgr, (input_size,input_size) )
|
||||||
|
|
||||||
predicted = predictor_func (predictor_input_bgr)
|
predicted = predictor_func (predictor_input_bgr)
|
||||||
if isinstance(predicted, tuple):
|
prd_face_bgr = np.clip (predicted[0], 0, 1.0)
|
||||||
#merger return bgr,mask
|
prd_face_mask_a_0 = np.clip (predicted[1], 0, 1.0)
|
||||||
prd_face_bgr = np.clip (predicted[0], 0, 1.0)
|
prd_face_dst_mask_a_0 = np.clip (predicted[2], 0, 1.0)
|
||||||
prd_face_mask_a_0 = np.clip (predicted[1], 0, 1.0)
|
|
||||||
predictor_masked = True
|
|
||||||
else:
|
|
||||||
#merger return bgr only, using dst mask
|
|
||||||
prd_face_bgr = np.clip (predicted, 0, 1.0 )
|
|
||||||
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (input_size,input_size) )
|
|
||||||
predictor_masked = False
|
|
||||||
|
|
||||||
if cfg.super_resolution_power != 0:
|
if cfg.super_resolution_power != 0:
|
||||||
prd_face_bgr_enhanced = face_enhancer_func(prd_face_bgr, is_tanh=True, preserve_size=False)
|
prd_face_bgr_enhanced = face_enhancer_func(prd_face_bgr, is_tanh=True, preserve_size=False)
|
||||||
|
@ -64,89 +55,100 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
|
||||||
prd_face_bgr = np.clip(prd_face_bgr, 0, 1)
|
prd_face_bgr = np.clip(prd_face_bgr, 0, 1)
|
||||||
|
|
||||||
if cfg.super_resolution_power != 0:
|
if cfg.super_resolution_power != 0:
|
||||||
if predictor_masked:
|
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
|
||||||
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
|
prd_face_dst_mask_a_0 = cv2.resize (prd_face_dst_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
|
||||||
else:
|
|
||||||
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC)
|
|
||||||
|
|
||||||
if cfg.mask_mode == 2: #dst
|
if cfg.mask_mode == 1: #dst
|
||||||
prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
|
wrk_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
|
||||||
elif cfg.mask_mode >= 3 and cfg.mask_mode <= 6: #XSeg modes
|
elif cfg.mask_mode == 2: #learned-prd
|
||||||
if cfg.mask_mode == 3 or cfg.mask_mode == 5 or cfg.mask_mode == 6:
|
wrk_face_mask_a_0 = prd_face_mask_a_0
|
||||||
|
elif cfg.mask_mode == 3: #learned-dst
|
||||||
|
wrk_face_mask_a_0 = prd_face_dst_mask_a_0
|
||||||
|
elif cfg.mask_mode == 4: #learned-prd*learned-dst
|
||||||
|
wrk_face_mask_a_0 = prd_face_mask_a_0*prd_face_dst_mask_a_0
|
||||||
|
elif cfg.mask_mode >= 5 and cfg.mask_mode <= 8: #XSeg modes
|
||||||
|
if cfg.mask_mode == 5 or cfg.mask_mode == 7 or cfg.mask_mode == 8:
|
||||||
# obtain XSeg-prd
|
# obtain XSeg-prd
|
||||||
prd_face_xseg_bgr = cv2.resize (prd_face_bgr, (xseg_input_size,)*2, cv2.INTER_CUBIC)
|
prd_face_xseg_bgr = cv2.resize (prd_face_bgr, (xseg_input_size,)*2, cv2.INTER_CUBIC)
|
||||||
prd_face_xseg_mask = xseg_256_extract_func(prd_face_xseg_bgr)
|
prd_face_xseg_mask = xseg_256_extract_func(prd_face_xseg_bgr)
|
||||||
X_prd_face_mask_a_0 = cv2.resize ( prd_face_xseg_mask, (output_size, output_size), cv2.INTER_CUBIC)
|
X_prd_face_mask_a_0 = cv2.resize ( prd_face_xseg_mask, (output_size, output_size), cv2.INTER_CUBIC)
|
||||||
|
|
||||||
if cfg.mask_mode >= 4 and cfg.mask_mode <= 6:
|
if cfg.mask_mode >= 6 and cfg.mask_mode <= 8:
|
||||||
# obtain XSeg-dst
|
# obtain XSeg-dst
|
||||||
xseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, xseg_input_size, face_type=cfg.face_type)
|
xseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, xseg_input_size, face_type=cfg.face_type)
|
||||||
dst_face_xseg_bgr = cv2.warpAffine(img_bgr, xseg_mat, (xseg_input_size,)*2, flags=cv2.INTER_CUBIC )
|
dst_face_xseg_bgr = cv2.warpAffine(img_bgr, xseg_mat, (xseg_input_size,)*2, flags=cv2.INTER_CUBIC )
|
||||||
dst_face_xseg_mask = xseg_256_extract_func(dst_face_xseg_bgr)
|
dst_face_xseg_mask = xseg_256_extract_func(dst_face_xseg_bgr)
|
||||||
X_dst_face_mask_a_0 = cv2.resize (dst_face_xseg_mask, (output_size,output_size), cv2.INTER_CUBIC)
|
X_dst_face_mask_a_0 = cv2.resize (dst_face_xseg_mask, (output_size,output_size), cv2.INTER_CUBIC)
|
||||||
|
|
||||||
if cfg.mask_mode == 3: #'XSeg-prd',
|
if cfg.mask_mode == 5: #'XSeg-prd'
|
||||||
prd_face_mask_a_0 = X_prd_face_mask_a_0
|
wrk_face_mask_a_0 = X_prd_face_mask_a_0
|
||||||
elif cfg.mask_mode == 4: #'XSeg-dst',
|
elif cfg.mask_mode == 6: #'XSeg-dst'
|
||||||
prd_face_mask_a_0 = X_dst_face_mask_a_0
|
wrk_face_mask_a_0 = X_dst_face_mask_a_0
|
||||||
elif cfg.mask_mode == 5: #'XSeg-prd*XSeg-dst',
|
elif cfg.mask_mode == 7: #'XSeg-prd*XSeg-dst'
|
||||||
prd_face_mask_a_0 = X_prd_face_mask_a_0 * X_dst_face_mask_a_0
|
wrk_face_mask_a_0 = X_prd_face_mask_a_0 * X_dst_face_mask_a_0
|
||||||
elif cfg.mask_mode == 6: #learned*XSeg-prd*XSeg-dst'
|
elif cfg.mask_mode == 8: #learned-prd*learned-dst*XSeg-prd*XSeg-dst
|
||||||
prd_face_mask_a_0 = prd_face_mask_a_0 * X_prd_face_mask_a_0 * X_dst_face_mask_a_0
|
wrk_face_mask_a_0 = prd_face_mask_a_0 * prd_face_dst_mask_a_0 * X_prd_face_mask_a_0 * X_dst_face_mask_a_0
|
||||||
|
|
||||||
prd_face_mask_a_0[ prd_face_mask_a_0 < (1.0/255.0) ] = 0.0 # get rid of noise
|
wrk_face_mask_a_0[ wrk_face_mask_a_0 < (1.0/255.0) ] = 0.0 # get rid of noise
|
||||||
|
|
||||||
# resize to mask_subres_size
|
# resize to mask_subres_size
|
||||||
if prd_face_mask_a_0.shape[0] != mask_subres_size:
|
if wrk_face_mask_a_0.shape[0] != mask_subres_size:
|
||||||
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (mask_subres_size, mask_subres_size), cv2.INTER_CUBIC)
|
wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (mask_subres_size, mask_subres_size), cv2.INTER_CUBIC)
|
||||||
|
|
||||||
# process mask in local predicted space
|
# process mask in local predicted space
|
||||||
if 'raw' not in cfg.mode:
|
if 'raw' not in cfg.mode:
|
||||||
# add zero pad
|
# add zero pad
|
||||||
prd_face_mask_a_0 = np.pad (prd_face_mask_a_0, input_size)
|
wrk_face_mask_a_0 = np.pad (wrk_face_mask_a_0, input_size)
|
||||||
|
|
||||||
ero = cfg.erode_mask_modifier
|
ero = cfg.erode_mask_modifier
|
||||||
blur = cfg.blur_mask_modifier
|
blur = cfg.blur_mask_modifier
|
||||||
|
|
||||||
if ero > 0:
|
if ero > 0:
|
||||||
prd_face_mask_a_0 = cv2.erode(prd_face_mask_a_0, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
wrk_face_mask_a_0 = cv2.erode(wrk_face_mask_a_0, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
||||||
elif ero < 0:
|
elif ero < 0:
|
||||||
prd_face_mask_a_0 = cv2.dilate(prd_face_mask_a_0, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
|
wrk_face_mask_a_0 = cv2.dilate(wrk_face_mask_a_0, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
|
||||||
|
|
||||||
# clip eroded/dilated mask in actual predict area
|
# clip eroded/dilated mask in actual predict area
|
||||||
# pad with half blur size in order to accuratelly fade to zero at the boundary
|
# pad with half blur size in order to accuratelly fade to zero at the boundary
|
||||||
clip_size = input_size + blur // 2
|
clip_size = input_size + blur // 2
|
||||||
|
|
||||||
prd_face_mask_a_0[:clip_size,:] = 0
|
wrk_face_mask_a_0[:clip_size,:] = 0
|
||||||
prd_face_mask_a_0[-clip_size:,:] = 0
|
wrk_face_mask_a_0[-clip_size:,:] = 0
|
||||||
prd_face_mask_a_0[:,:clip_size] = 0
|
wrk_face_mask_a_0[:,:clip_size] = 0
|
||||||
prd_face_mask_a_0[:,-clip_size:] = 0
|
wrk_face_mask_a_0[:,-clip_size:] = 0
|
||||||
|
|
||||||
if blur > 0:
|
if blur > 0:
|
||||||
blur = blur + (1-blur % 2)
|
blur = blur + (1-blur % 2)
|
||||||
prd_face_mask_a_0 = cv2.GaussianBlur(prd_face_mask_a_0, (blur, blur) , 0)
|
wrk_face_mask_a_0 = cv2.GaussianBlur(wrk_face_mask_a_0, (blur, blur) , 0)
|
||||||
|
|
||||||
prd_face_mask_a_0 = prd_face_mask_a_0[input_size:-input_size,input_size:-input_size]
|
wrk_face_mask_a_0 = wrk_face_mask_a_0[input_size:-input_size,input_size:-input_size]
|
||||||
|
|
||||||
prd_face_mask_a_0 = np.clip(prd_face_mask_a_0, 0, 1)
|
wrk_face_mask_a_0 = np.clip(wrk_face_mask_a_0, 0, 1)
|
||||||
|
|
||||||
img_face_mask_a = cv2.warpAffine( prd_face_mask_a_0, face_mask_output_mat, img_size, np.zeros(img_bgr.shape[0:2], dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC )[...,None]
|
img_face_mask_a = cv2.warpAffine( wrk_face_mask_a_0, face_mask_output_mat, img_size, np.zeros(img_bgr.shape[0:2], dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC )[...,None]
|
||||||
img_face_mask_a = np.clip (img_face_mask_a, 0.0, 1.0)
|
img_face_mask_a = np.clip (img_face_mask_a, 0.0, 1.0)
|
||||||
|
|
||||||
img_face_mask_a [ img_face_mask_a < (1.0/255.0) ] = 0.0 # get rid of noise
|
img_face_mask_a [ img_face_mask_a < (1.0/255.0) ] = 0.0 # get rid of noise
|
||||||
|
|
||||||
if prd_face_mask_a_0.shape[0] != output_size:
|
if wrk_face_mask_a_0.shape[0] != output_size:
|
||||||
prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
|
wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
|
||||||
|
|
||||||
prd_face_mask_a = prd_face_mask_a_0[...,None]
|
wrk_face_mask_a = wrk_face_mask_a_0[...,None]
|
||||||
prd_face_mask_area_a = prd_face_mask_a.copy()
|
wrk_face_mask_area_a = wrk_face_mask_a.copy()
|
||||||
prd_face_mask_area_a[prd_face_mask_area_a>0] = 1.0
|
wrk_face_mask_area_a[wrk_face_mask_area_a>0] = 1.0
|
||||||
|
|
||||||
if 'raw' in cfg.mode:
|
if cfg.mode == 'original':
|
||||||
|
return img_bgr, img_face_mask_a
|
||||||
|
|
||||||
|
elif 'raw' in cfg.mode:
|
||||||
if cfg.mode == 'raw-rgb':
|
if cfg.mode == 'raw-rgb':
|
||||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
|
||||||
out_merging_mask_a = img_face_mask_a
|
out_merging_mask_a = img_face_mask_a
|
||||||
|
|
||||||
|
elif cfg.mode == 'raw-predict':
|
||||||
|
out_img = prd_face_bgr
|
||||||
|
out_merging_mask_a = wrk_face_mask_a
|
||||||
|
|
||||||
out_img = np.clip (out_img, 0.0, 1.0 )
|
out_img = np.clip (out_img, 0.0, 1.0 )
|
||||||
else:
|
else:
|
||||||
#averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
|
#averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
|
||||||
|
@ -165,8 +167,8 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
|
||||||
|
|
||||||
if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
|
if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
|
||||||
if cfg.color_transfer_mode == 1: #rct
|
if cfg.color_transfer_mode == 1: #rct
|
||||||
prd_face_bgr = imagelib.reinhard_color_transfer ( np.clip( prd_face_bgr*prd_face_mask_area_a*255, 0, 255).astype(np.uint8),
|
prd_face_bgr = imagelib.reinhard_color_transfer ( np.clip( prd_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8),
|
||||||
np.clip( dst_face_bgr*prd_face_mask_area_a*255, 0, 255).astype(np.uint8), )
|
np.clip( dst_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8), )
|
||||||
|
|
||||||
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
||||||
elif cfg.color_transfer_mode == 2: #lct
|
elif cfg.color_transfer_mode == 2: #lct
|
||||||
|
@ -174,22 +176,22 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
|
||||||
elif cfg.color_transfer_mode == 3: #mkl
|
elif cfg.color_transfer_mode == 3: #mkl
|
||||||
prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr, dst_face_bgr)
|
prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr, dst_face_bgr)
|
||||||
elif cfg.color_transfer_mode == 4: #mkl-m
|
elif cfg.color_transfer_mode == 4: #mkl-m
|
||||||
prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
elif cfg.color_transfer_mode == 5: #idt
|
elif cfg.color_transfer_mode == 5: #idt
|
||||||
prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr, dst_face_bgr)
|
prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr, dst_face_bgr)
|
||||||
elif cfg.color_transfer_mode == 6: #idt-m
|
elif cfg.color_transfer_mode == 6: #idt-m
|
||||||
prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
elif cfg.color_transfer_mode == 7: #sot-m
|
elif cfg.color_transfer_mode == 7: #sot-m
|
||||||
prd_face_bgr = imagelib.color_transfer_sot (prd_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
prd_face_bgr = imagelib.color_transfer_sot (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
prd_face_bgr = np.clip (prd_face_bgr, 0.0, 1.0)
|
prd_face_bgr = np.clip (prd_face_bgr, 0.0, 1.0)
|
||||||
elif cfg.color_transfer_mode == 8: #mix-m
|
elif cfg.color_transfer_mode == 8: #mix-m
|
||||||
prd_face_bgr = imagelib.color_transfer_mix (prd_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
prd_face_bgr = imagelib.color_transfer_mix (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
|
|
||||||
if cfg.mode == 'hist-match':
|
if cfg.mode == 'hist-match':
|
||||||
hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
||||||
|
|
||||||
if cfg.masked_hist_match:
|
if cfg.masked_hist_match:
|
||||||
hist_mask_a *= prd_face_mask_area_a
|
hist_mask_a *= wrk_face_mask_area_a
|
||||||
|
|
||||||
white = (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
white = (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
||||||
|
|
||||||
|
@ -240,24 +242,24 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
|
||||||
|
|
||||||
if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
|
if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
|
||||||
if cfg.color_transfer_mode == 1:
|
if cfg.color_transfer_mode == 1:
|
||||||
out_face_bgr = imagelib.reinhard_color_transfer ( np.clip(out_face_bgr*prd_face_mask_area_a*255, 0, 255).astype(np.uint8),
|
out_face_bgr = imagelib.reinhard_color_transfer ( np.clip(out_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8),
|
||||||
np.clip(dst_face_bgr*prd_face_mask_area_a*255, 0, 255).astype(np.uint8) )
|
np.clip(dst_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8) )
|
||||||
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
|
||||||
elif cfg.color_transfer_mode == 2: #lct
|
elif cfg.color_transfer_mode == 2: #lct
|
||||||
out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr)
|
out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr)
|
||||||
elif cfg.color_transfer_mode == 3: #mkl
|
elif cfg.color_transfer_mode == 3: #mkl
|
||||||
out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr, dst_face_bgr)
|
out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr, dst_face_bgr)
|
||||||
elif cfg.color_transfer_mode == 4: #mkl-m
|
elif cfg.color_transfer_mode == 4: #mkl-m
|
||||||
out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
elif cfg.color_transfer_mode == 5: #idt
|
elif cfg.color_transfer_mode == 5: #idt
|
||||||
out_face_bgr = imagelib.color_transfer_idt (out_face_bgr, dst_face_bgr)
|
out_face_bgr = imagelib.color_transfer_idt (out_face_bgr, dst_face_bgr)
|
||||||
elif cfg.color_transfer_mode == 6: #idt-m
|
elif cfg.color_transfer_mode == 6: #idt-m
|
||||||
out_face_bgr = imagelib.color_transfer_idt (out_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
out_face_bgr = imagelib.color_transfer_idt (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
elif cfg.color_transfer_mode == 7: #sot-m
|
elif cfg.color_transfer_mode == 7: #sot-m
|
||||||
out_face_bgr = imagelib.color_transfer_sot (out_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
out_face_bgr = imagelib.color_transfer_sot (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
out_face_bgr = np.clip (out_face_bgr, 0.0, 1.0)
|
out_face_bgr = np.clip (out_face_bgr, 0.0, 1.0)
|
||||||
elif cfg.color_transfer_mode == 8: #mix-m
|
elif cfg.color_transfer_mode == 8: #mix-m
|
||||||
out_face_bgr = imagelib.color_transfer_mix (out_face_bgr*prd_face_mask_area_a, dst_face_bgr*prd_face_mask_area_a)
|
out_face_bgr = imagelib.color_transfer_mix (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
|
||||||
|
|
||||||
if cfg.mode == 'seamless-hist-match':
|
if cfg.mode == 'seamless-hist-match':
|
||||||
out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)
|
out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)
|
||||||
|
|
|
@ -76,19 +76,19 @@ mode_dict = {0:'original',
|
||||||
2:'hist-match',
|
2:'hist-match',
|
||||||
3:'seamless',
|
3:'seamless',
|
||||||
4:'seamless-hist-match',
|
4:'seamless-hist-match',
|
||||||
5:'raw-rgb',}
|
5:'raw-rgb',
|
||||||
|
6:'raw-predict'}
|
||||||
|
|
||||||
mode_str_dict = {}
|
mode_str_dict = { mode_dict[key] : key for key in mode_dict.keys() }
|
||||||
|
|
||||||
for key in mode_dict.keys():
|
mask_mode_dict = {1:'dst',
|
||||||
mode_str_dict[ mode_dict[key] ] = key
|
2:'learned-prd',
|
||||||
|
3:'learned-dst',
|
||||||
mask_mode_dict = {1:'learned',
|
4:'learned-prd*learned-dst',
|
||||||
2:'dst',
|
5:'XSeg-prd',
|
||||||
3:'XSeg-prd',
|
6:'XSeg-dst',
|
||||||
4:'XSeg-dst',
|
7:'XSeg-prd*XSeg-dst',
|
||||||
5:'XSeg-prd*XSeg-dst',
|
8:'learned-prd*learned-dst*XSeg-prd*XSeg-dst'
|
||||||
6:'learned*XSeg-prd*XSeg-dst'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ class MergerConfigMasked(MergerConfig):
|
||||||
mode='overlay',
|
mode='overlay',
|
||||||
masked_hist_match=True,
|
masked_hist_match=True,
|
||||||
hist_match_threshold = 238,
|
hist_match_threshold = 238,
|
||||||
mask_mode = 1,
|
mask_mode = 4,
|
||||||
erode_mask_modifier = 0,
|
erode_mask_modifier = 0,
|
||||||
blur_mask_modifier = 0,
|
blur_mask_modifier = 0,
|
||||||
motion_blur_power = 0,
|
motion_blur_power = 0,
|
||||||
|
@ -118,7 +118,7 @@ class MergerConfigMasked(MergerConfig):
|
||||||
super().__init__(type=MergerConfig.TYPE_MASKED, **kwargs)
|
super().__init__(type=MergerConfig.TYPE_MASKED, **kwargs)
|
||||||
|
|
||||||
self.face_type = face_type
|
self.face_type = face_type
|
||||||
if self.face_type not in [FaceType.HALF, FaceType.MID_FULL, FaceType.FULL, FaceType.WHOLE_FACE ]:
|
if self.face_type not in [FaceType.HALF, FaceType.MID_FULL, FaceType.FULL, FaceType.WHOLE_FACE, FaceType.HEAD ]:
|
||||||
raise ValueError("MergerConfigMasked does not support this type of face.")
|
raise ValueError("MergerConfigMasked does not support this type of face.")
|
||||||
|
|
||||||
self.default_mode = default_mode
|
self.default_mode = default_mode
|
||||||
|
@ -274,8 +274,8 @@ class MergerConfigMasked(MergerConfig):
|
||||||
|
|
||||||
if 'raw' not in self.mode:
|
if 'raw' not in self.mode:
|
||||||
r += f"""color_transfer_mode: {ctm_dict[self.color_transfer_mode]}\n"""
|
r += f"""color_transfer_mode: {ctm_dict[self.color_transfer_mode]}\n"""
|
||||||
|
r += super().to_string(filename)
|
||||||
|
|
||||||
r += super().to_string(filename)
|
|
||||||
r += f"""super_resolution_power: {self.super_resolution_power}\n"""
|
r += f"""super_resolution_power: {self.super_resolution_power}\n"""
|
||||||
|
|
||||||
if 'raw' not in self.mode:
|
if 'raw' not in self.mode:
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 307 KiB After Width: | Height: | Size: 310 KiB |
Binary file not shown.
|
@ -308,8 +308,7 @@ class QModel(ModelBase):
|
||||||
face = nn.to_data_format(face[None,...], self.model_data_format, "NHWC")
|
face = nn.to_data_format(face[None,...], self.model_data_format, "NHWC")
|
||||||
|
|
||||||
bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x, "NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face) ]
|
bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x, "NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face) ]
|
||||||
mask = mask_dst_dstm[0] * mask_src_dstm[0]
|
return bgr[0], mask_src_dstm[0][...,0], mask_dst_dstm[0][...,0]
|
||||||
return bgr[0], mask[...,0]
|
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def get_MergerConfig(self):
|
def get_MergerConfig(self):
|
||||||
|
|
|
@ -60,7 +60,7 @@ class SAEHDModel(ModelBase):
|
||||||
resolution = io.input_int("Resolution", default_resolution, add_info="64-512", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
|
resolution = io.input_int("Resolution", default_resolution, add_info="64-512", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
|
||||||
resolution = np.clip ( (resolution // 16) * 16, 64, 512)
|
resolution = np.clip ( (resolution // 16) * 16, 64, 512)
|
||||||
self.options['resolution'] = resolution
|
self.options['resolution'] = resolution
|
||||||
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf'], help_message="Half / mid face / full face / whole face. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead, but requires manual merge in Adobe After Effects.").lower()
|
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset.").lower()
|
||||||
self.options['archi'] = io.input_str ("AE architecture", default_archi, ['df','liae','dfhd','liaehd','dfuhd','liaeuhd'], help_message="'df' keeps faces more natural.\n'liae' can fix overly different face shapes.\n'hd' are experimental versions.").lower()
|
self.options['archi'] = io.input_str ("AE architecture", default_archi, ['df','liae','dfhd','liaehd','dfuhd','liaeuhd'], help_message="'df' keeps faces more natural.\n'liae' can fix overly different face shapes.\n'hd' are experimental versions.").lower()
|
||||||
|
|
||||||
default_d_dims = 48 if self.options['archi'] == 'dfhd' else 64
|
default_d_dims = 48 if self.options['archi'] == 'dfhd' else 64
|
||||||
|
@ -84,7 +84,7 @@ class SAEHDModel(ModelBase):
|
||||||
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
|
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
|
||||||
|
|
||||||
if self.is_first_run() or ask_override:
|
if self.is_first_run() or ask_override:
|
||||||
if self.options['face_type'] == 'wf':
|
if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head':
|
||||||
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' type. Masked training clips training area to full_face mask, thus network will train the faces properly. When the face is trained enough, disable this option to train all area of the frame. Merge with 'raw-rgb' mode, then use Adobe After Effects to manually mask and compose whole face include forehead.")
|
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' type. Masked training clips training area to full_face mask, thus network will train the faces properly. When the face is trained enough, disable this option to train all area of the frame. Merge with 'raw-rgb' mode, then use Adobe After Effects to manually mask and compose whole face include forehead.")
|
||||||
|
|
||||||
self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
|
self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
|
||||||
|
@ -127,7 +127,8 @@ class SAEHDModel(ModelBase):
|
||||||
self.face_type = {'h' : FaceType.HALF,
|
self.face_type = {'h' : FaceType.HALF,
|
||||||
'mf' : FaceType.MID_FULL,
|
'mf' : FaceType.MID_FULL,
|
||||||
'f' : FaceType.FULL,
|
'f' : FaceType.FULL,
|
||||||
'wf' : FaceType.WHOLE_FACE}[ self.options['face_type'] ]
|
'wf' : FaceType.WHOLE_FACE,
|
||||||
|
'head' : FaceType.HEAD}[ self.options['face_type'] ]
|
||||||
|
|
||||||
eyes_prio = self.options['eyes_prio']
|
eyes_prio = self.options['eyes_prio']
|
||||||
archi = self.options['archi']
|
archi = self.options['archi']
|
||||||
|
@ -602,7 +603,9 @@ class SAEHDModel(ModelBase):
|
||||||
|
|
||||||
st_m = []
|
st_m = []
|
||||||
for i in range(n_samples):
|
for i in range(n_samples):
|
||||||
ar = S[i]*target_srcm[i], SS[i], D[i]*target_dstm[i], DD[i]*DDM[i], SD[i]*(DDM[i]*SDM[i])
|
SD_mask = DDM[i]*SDM[i] if self.face_type < FaceType.HEAD else SDM[i]
|
||||||
|
|
||||||
|
ar = S[i]*target_srcm[i], SS[i], D[i]*target_dstm[i], DD[i]*DDM[i], SD[i]*SD_mask
|
||||||
st_m.append ( np.concatenate ( ar, axis=1) )
|
st_m.append ( np.concatenate ( ar, axis=1) )
|
||||||
|
|
||||||
result += [ ('SAEHD masked', np.concatenate (st_m, axis=0 )), ]
|
result += [ ('SAEHD masked', np.concatenate (st_m, axis=0 )), ]
|
||||||
|
@ -642,7 +645,8 @@ class SAEHDModel(ModelBase):
|
||||||
|
|
||||||
st_m = []
|
st_m = []
|
||||||
for i in range(n_samples):
|
for i in range(n_samples):
|
||||||
ar = D[i]*target_dstm[i], SD[i]*(DDM[i]*SDM[i])
|
SD_mask = DDM[i]*SDM[i] if self.face_type < FaceType.HEAD else SDM[i]
|
||||||
|
ar = D[i]*target_dstm[i], SD[i]*SD_mask
|
||||||
st_m.append ( np.concatenate ( ar, axis=1) )
|
st_m.append ( np.concatenate ( ar, axis=1) )
|
||||||
result += [ ('SAEHD masked pred', np.concatenate (st_m, axis=0 )), ]
|
result += [ ('SAEHD masked pred', np.concatenate (st_m, axis=0 )), ]
|
||||||
|
|
||||||
|
@ -653,8 +657,7 @@ class SAEHDModel(ModelBase):
|
||||||
|
|
||||||
bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face) ]
|
bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face) ]
|
||||||
|
|
||||||
mask = mask_dst_dstm[0] * mask_src_dstm[0]
|
return bgr[0], mask_src_dstm[0][...,0], mask_dst_dstm[0][...,0]
|
||||||
return bgr[0], mask[...,0]
|
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def get_MergerConfig(self):
|
def get_MergerConfig(self):
|
||||||
|
|
|
@ -22,15 +22,14 @@ class XSegModel(ModelBase):
|
||||||
|
|
||||||
ask_override = self.ask_override()
|
ask_override = self.ask_override()
|
||||||
|
|
||||||
|
if not self.is_first_run() and ask_override:
|
||||||
|
if io.input_bool(f"Restart training?", False, help_message="Reset model weights and start training from scratch."):
|
||||||
|
self.set_iter(0)
|
||||||
|
|
||||||
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf')
|
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf')
|
||||||
|
|
||||||
if not self.is_first_run() and ask_override:
|
|
||||||
self.restart_training = io.input_bool(f"Restart training?", False, help_message="Reset model weights and start training from scratch.")
|
|
||||||
else:
|
|
||||||
self.restart_training = False
|
|
||||||
|
|
||||||
if self.is_first_run():
|
if self.is_first_run():
|
||||||
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf'], help_message="Half / mid face / full face / whole face. Choose the same as your deepfake model.").lower()
|
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Choose the same as your deepfake model.").lower()
|
||||||
|
|
||||||
|
|
||||||
#override
|
#override
|
||||||
|
@ -45,13 +44,12 @@ class XSegModel(ModelBase):
|
||||||
|
|
||||||
self.resolution = resolution = 256
|
self.resolution = resolution = 256
|
||||||
|
|
||||||
if self.restart_training:
|
|
||||||
self.set_iter(0)
|
|
||||||
|
|
||||||
self.face_type = {'h' : FaceType.HALF,
|
self.face_type = {'h' : FaceType.HALF,
|
||||||
'mf' : FaceType.MID_FULL,
|
'mf' : FaceType.MID_FULL,
|
||||||
'f' : FaceType.FULL,
|
'f' : FaceType.FULL,
|
||||||
'wf' : FaceType.WHOLE_FACE}[ self.options['face_type'] ]
|
'wf' : FaceType.WHOLE_FACE,
|
||||||
|
'head' : FaceType.HEAD}[ self.options['face_type'] ]
|
||||||
|
|
||||||
place_model_on_cpu = len(devices) == 0
|
place_model_on_cpu = len(devices) == 0
|
||||||
models_opt_device = '/CPU:0' if place_model_on_cpu else '/GPU:0'
|
models_opt_device = '/CPU:0' if place_model_on_cpu else '/GPU:0'
|
||||||
|
|
|
@ -127,13 +127,7 @@ class SampleProcessor(object):
|
||||||
if face_type is None:
|
if face_type is None:
|
||||||
raise ValueError("face_type must be defined for face samples")
|
raise ValueError("face_type must be defined for face samples")
|
||||||
|
|
||||||
if face_type > sample.face_type:
|
|
||||||
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, face_type) )
|
|
||||||
|
|
||||||
|
|
||||||
if sample_type == SPST.FACE_MASK:
|
if sample_type == SPST.FACE_MASK:
|
||||||
|
|
||||||
|
|
||||||
if face_mask_type == SPFMT.FULL_FACE:
|
if face_mask_type == SPFMT.FULL_FACE:
|
||||||
img = get_full_face_mask()
|
img = get_full_face_mask()
|
||||||
elif face_mask_type == SPFMT.EYES:
|
elif face_mask_type == SPFMT.EYES:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue