mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-14 17:13:42 -07:00
added option to converter --output-face-scale-modifier
This commit is contained in:
parent
2576a411a5
commit
64c3e57f1c
3 changed files with 29 additions and 19 deletions
|
@ -35,8 +35,8 @@ landmarks_68_pt = { "mouth": (48,68),
|
|||
"left_eye": (42, 48),
|
||||
"nose": (27, 36), # missed one point
|
||||
"jaw": (0, 17) }
|
||||
|
||||
def get_transform_mat (image_landmarks, output_size, face_type):
|
||||
|
||||
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
||||
if not isinstance(image_landmarks, np.ndarray):
|
||||
image_landmarks = np.array (image_landmarks)
|
||||
|
||||
|
@ -63,13 +63,15 @@ def get_transform_mat (image_landmarks, output_size, face_type):
|
|||
padding = (output_size / 64) * 24
|
||||
else:
|
||||
raise ValueError ('wrong face_type')
|
||||
|
||||
|
||||
mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
|
||||
mat = mat * (output_size - 2 * padding)
|
||||
mat[:,2] += padding
|
||||
|
||||
mat[:,2] += padding
|
||||
mat *= (1 / scale)
|
||||
mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 )
|
||||
|
||||
return mat
|
||||
|
||||
|
||||
def transform_points(points, mat, invert=False):
|
||||
if invert:
|
||||
mat = cv2.invertAffineTransform (mat)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue