mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 22:34:25 -07:00
Fix in-progress
This commit is contained in:
parent
3e0ed0abb8
commit
865f9ebd25
2 changed files with 270 additions and 197 deletions
|
@ -10,116 +10,149 @@ from facelib import FaceType
|
||||||
import math
|
import math
|
||||||
|
|
||||||
mean_face_x = np.array([
|
mean_face_x = np.array([
|
||||||
0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
|
0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
|
||||||
0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
|
0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
|
||||||
0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
|
0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
|
||||||
0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
|
0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
|
||||||
0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
|
0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
|
||||||
0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
|
0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
|
||||||
0.553364, 0.490127, 0.42689 ])
|
0.553364, 0.490127, 0.42689])
|
||||||
|
|
||||||
mean_face_y = np.array([
|
mean_face_y = np.array([
|
||||||
0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
|
0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
|
||||||
0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
|
0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
|
||||||
0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
|
0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
|
||||||
0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
|
0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
|
||||||
0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
|
0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
|
||||||
0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
|
0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
|
||||||
0.784792, 0.824182, 0.831803, 0.824182 ])
|
0.784792, 0.824182, 0.831803, 0.824182])
|
||||||
|
|
||||||
landmarks_2D = np.stack( [ mean_face_x, mean_face_y ], axis=1 )
|
landmarks_2D = np.stack([mean_face_x, mean_face_y], axis=1)
|
||||||
|
|
||||||
# 68 point landmark definitions
|
# 68 point landmark definitions
|
||||||
landmarks_68_pt = { "mouth": (48,68),
|
landmarks_68_pt = {"mouth": (48, 68),
|
||||||
"right_eyebrow": (17, 22),
|
"right_eyebrow": (17, 22),
|
||||||
"left_eyebrow": (22, 27),
|
"left_eyebrow": (22, 27),
|
||||||
"right_eye": (36, 42),
|
"right_eye": (36, 42),
|
||||||
"left_eye": (42, 48),
|
"left_eye": (42, 48),
|
||||||
"nose": (27, 36), # missed one point
|
"nose": (27, 36), # missed one point
|
||||||
"jaw": (0, 17) }
|
"jaw": (0, 17)}
|
||||||
|
|
||||||
|
landmarks_68_3D = np.array([
|
||||||
|
[-73.393523, -29.801432, 47.667532],
|
||||||
|
[-72.775014, -10.949766, 45.909403],
|
||||||
|
[-70.533638, 7.929818, 44.842580],
|
||||||
|
[-66.850058, 26.074280, 43.141114],
|
||||||
|
[-59.790187, 42.564390, 38.635298],
|
||||||
|
[-48.368973, 56.481080, 30.750622],
|
||||||
|
[-34.121101, 67.246992, 18.456453],
|
||||||
|
[-17.875411, 75.056892, 3.609035],
|
||||||
|
[0.098749, 77.061286, -0.881698],
|
||||||
|
[17.477031, 74.758448, 5.181201],
|
||||||
|
[32.648966, 66.929021, 19.176563],
|
||||||
|
[46.372358, 56.311389, 30.770570],
|
||||||
|
[57.343480, 42.419126, 37.628629],
|
||||||
|
[64.388482, 25.455880, 40.886309],
|
||||||
|
[68.212038, 6.990805, 42.281449],
|
||||||
|
[70.486405, -11.666193, 44.142567],
|
||||||
|
[71.375822, -30.365191, 47.140426],
|
||||||
|
[-61.119406, -49.361602, 14.254422],
|
||||||
|
[-51.287588, -58.769795, 7.268147],
|
||||||
|
[-37.804800, -61.996155, 0.442051],
|
||||||
|
[-24.022754, -61.033399, -6.606501],
|
||||||
|
[-11.635713, -56.686759, -11.967398],
|
||||||
|
[12.056636, -57.391033, -12.051204],
|
||||||
|
[25.106256, -61.902186, -7.315098],
|
||||||
|
[38.338588, -62.777713, -1.022953],
|
||||||
|
[51.191007, -59.302347, 5.349435],
|
||||||
|
[60.053851, -50.190255, 11.615746],
|
||||||
|
[0.653940, -42.193790, -13.380835],
|
||||||
|
[0.804809, -30.993721, -21.150853],
|
||||||
|
[0.992204, -19.944596, -29.284036],
|
||||||
|
[1.226783, -8.414541, -36.948060],
|
||||||
|
[-14.772472, 2.598255, -20.132003],
|
||||||
|
[-7.180239, 4.751589, -23.536684],
|
||||||
|
[0.555920, 6.562900, -25.944448],
|
||||||
|
[8.272499, 4.661005, -23.695741],
|
||||||
|
[15.214351, 2.643046, -20.858157],
|
||||||
|
[-46.047290, -37.471411, 7.037989],
|
||||||
|
[-37.674688, -42.730510, 3.021217],
|
||||||
|
[-27.883856, -42.711517, 1.353629],
|
||||||
|
[-19.648268, -36.754742, -0.111088],
|
||||||
|
[-28.272965, -35.134493, -0.147273],
|
||||||
|
[-38.082418, -34.919043, 1.476612],
|
||||||
|
[19.265868, -37.032306, -0.665746],
|
||||||
|
[27.894191, -43.342445, 0.247660],
|
||||||
|
[37.437529, -43.110822, 1.696435],
|
||||||
|
[45.170805, -38.086515, 4.894163],
|
||||||
|
[38.196454, -35.532024, 0.282961],
|
||||||
|
[28.764989, -35.484289, -1.172675],
|
||||||
|
[-28.916267, 28.612716, -2.240310],
|
||||||
|
[-17.533194, 22.172187, -15.934335],
|
||||||
|
[-6.684590, 19.029051, -22.611355],
|
||||||
|
[0.381001, 20.721118, -23.748437],
|
||||||
|
[8.375443, 19.035460, -22.721995],
|
||||||
|
[18.876618, 22.394109, -15.610679],
|
||||||
|
[28.794412, 28.079924, -3.217393],
|
||||||
|
[19.057574, 36.298248, -14.987997],
|
||||||
|
[8.956375, 39.634575, -22.554245],
|
||||||
|
[0.381549, 40.395647, -23.591626],
|
||||||
|
[-7.428895, 39.836405, -22.406106],
|
||||||
|
[-18.160634, 36.677899, -15.121907],
|
||||||
|
[-24.377490, 28.677771, -4.785684],
|
||||||
|
[-6.897633, 25.475976, -20.893742],
|
||||||
|
[0.340663, 26.014269, -22.220479],
|
||||||
|
[8.444722, 25.326198, -21.025520],
|
||||||
|
[24.474473, 28.323008, -5.712776],
|
||||||
|
[8.449166, 30.596216, -20.671489],
|
||||||
|
[0.205322, 31.408738, -21.903670],
|
||||||
|
[-7.198266, 30.844876, -20.328022]], dtype=np.float32)
|
||||||
|
|
||||||
landmarks_68_3D = np.array( [
|
|
||||||
[-73.393523 , -29.801432 , 47.667532 ],
|
|
||||||
[-72.775014 , -10.949766 , 45.909403 ],
|
|
||||||
[-70.533638 , 7.929818 , 44.842580 ],
|
|
||||||
[-66.850058 , 26.074280 , 43.141114 ],
|
|
||||||
[-59.790187 , 42.564390 , 38.635298 ],
|
|
||||||
[-48.368973 , 56.481080 , 30.750622 ],
|
|
||||||
[-34.121101 , 67.246992 , 18.456453 ],
|
|
||||||
[-17.875411 , 75.056892 , 3.609035 ],
|
|
||||||
[0.098749 , 77.061286 , -0.881698 ],
|
|
||||||
[17.477031 , 74.758448 , 5.181201 ],
|
|
||||||
[32.648966 , 66.929021 , 19.176563 ],
|
|
||||||
[46.372358 , 56.311389 , 30.770570 ],
|
|
||||||
[57.343480 , 42.419126 , 37.628629 ],
|
|
||||||
[64.388482 , 25.455880 , 40.886309 ],
|
|
||||||
[68.212038 , 6.990805 , 42.281449 ],
|
|
||||||
[70.486405 , -11.666193 , 44.142567 ],
|
|
||||||
[71.375822 , -30.365191 , 47.140426 ],
|
|
||||||
[-61.119406 , -49.361602 , 14.254422 ],
|
|
||||||
[-51.287588 , -58.769795 , 7.268147 ],
|
|
||||||
[-37.804800 , -61.996155 , 0.442051 ],
|
|
||||||
[-24.022754 , -61.033399 , -6.606501 ],
|
|
||||||
[-11.635713 , -56.686759 , -11.967398 ],
|
|
||||||
[12.056636 , -57.391033 , -12.051204 ],
|
|
||||||
[25.106256 , -61.902186 , -7.315098 ],
|
|
||||||
[38.338588 , -62.777713 , -1.022953 ],
|
|
||||||
[51.191007 , -59.302347 , 5.349435 ],
|
|
||||||
[60.053851 , -50.190255 , 11.615746 ],
|
|
||||||
[0.653940 , -42.193790 , -13.380835 ],
|
|
||||||
[0.804809 , -30.993721 , -21.150853 ],
|
|
||||||
[0.992204 , -19.944596 , -29.284036 ],
|
|
||||||
[1.226783 , -8.414541 , -36.948060 ],
|
|
||||||
[-14.772472 , 2.598255 , -20.132003 ],
|
|
||||||
[-7.180239 , 4.751589 , -23.536684 ],
|
|
||||||
[0.555920 , 6.562900 , -25.944448 ],
|
|
||||||
[8.272499 , 4.661005 , -23.695741 ],
|
|
||||||
[15.214351 , 2.643046 , -20.858157 ],
|
|
||||||
[-46.047290 , -37.471411 , 7.037989 ],
|
|
||||||
[-37.674688 , -42.730510 , 3.021217 ],
|
|
||||||
[-27.883856 , -42.711517 , 1.353629 ],
|
|
||||||
[-19.648268 , -36.754742 , -0.111088 ],
|
|
||||||
[-28.272965 , -35.134493 , -0.147273 ],
|
|
||||||
[-38.082418 , -34.919043 , 1.476612 ],
|
|
||||||
[19.265868 , -37.032306 , -0.665746 ],
|
|
||||||
[27.894191 , -43.342445 , 0.247660 ],
|
|
||||||
[37.437529 , -43.110822 , 1.696435 ],
|
|
||||||
[45.170805 , -38.086515 , 4.894163 ],
|
|
||||||
[38.196454 , -35.532024 , 0.282961 ],
|
|
||||||
[28.764989 , -35.484289 , -1.172675 ],
|
|
||||||
[-28.916267 , 28.612716 , -2.240310 ],
|
|
||||||
[-17.533194 , 22.172187 , -15.934335 ],
|
|
||||||
[-6.684590 , 19.029051 , -22.611355 ],
|
|
||||||
[0.381001 , 20.721118 , -23.748437 ],
|
|
||||||
[8.375443 , 19.035460 , -22.721995 ],
|
|
||||||
[18.876618 , 22.394109 , -15.610679 ],
|
|
||||||
[28.794412 , 28.079924 , -3.217393 ],
|
|
||||||
[19.057574 , 36.298248 , -14.987997 ],
|
|
||||||
[8.956375 , 39.634575 , -22.554245 ],
|
|
||||||
[0.381549 , 40.395647 , -23.591626 ],
|
|
||||||
[-7.428895 , 39.836405 , -22.406106 ],
|
|
||||||
[-18.160634 , 36.677899 , -15.121907 ],
|
|
||||||
[-24.377490 , 28.677771 , -4.785684 ],
|
|
||||||
[-6.897633 , 25.475976 , -20.893742 ],
|
|
||||||
[0.340663 , 26.014269 , -22.220479 ],
|
|
||||||
[8.444722 , 25.326198 , -21.025520 ],
|
|
||||||
[24.474473 , 28.323008 , -5.712776 ],
|
|
||||||
[8.449166 , 30.596216 , -20.671489 ],
|
|
||||||
[0.205322 , 31.408738 , -21.903670 ],
|
|
||||||
[-7.198266 , 30.844876 , -20.328022 ] ], dtype=np.float32)
|
|
||||||
|
|
||||||
def transform_points(points, mat, invert=False):
|
def transform_points(points, mat, invert=False):
|
||||||
if invert:
|
if invert:
|
||||||
mat = cv2.invertAffineTransform (mat)
|
mat = cv2.invertAffineTransform(mat)
|
||||||
points = np.expand_dims(points, axis=1)
|
points = np.expand_dims(points, axis=1)
|
||||||
points = cv2.transform(points, mat, points.shape)
|
points = cv2.transform(points, mat, points.shape)
|
||||||
points = np.squeeze(points)
|
points = np.squeeze(points)
|
||||||
return points
|
return points
|
||||||
|
|
||||||
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
|
||||||
|
def get_translation_scale_tan_rotation_of_mat(mat):
|
||||||
|
# TODO
|
||||||
|
# extracting rotation, scale values from 2d transformation matrix
|
||||||
|
# https://math.stackexchange.com/questions/13150/extracting-rotation-scale-values-from-2d-transformation-matrix/13165#13165
|
||||||
|
a, b, tx = mat[0, :]
|
||||||
|
c, d, ty = mat[1, :]
|
||||||
|
|
||||||
|
sx = np.sign(a) * math.sqrt(a ** 2 + b ** 2)
|
||||||
|
sy = np.sign(d) * math.sqrt(c ** 2 + d ** 2)
|
||||||
|
|
||||||
|
tan_psi = -b / a
|
||||||
|
return {
|
||||||
|
'tx': tx,
|
||||||
|
'ty': ty,
|
||||||
|
'sx': sx,
|
||||||
|
'sy': sy,
|
||||||
|
'tan_psi': tan_psi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_scale_of_mat(mat):
|
||||||
|
# TODO
|
||||||
|
return np.mean(np.sqrt(np.sum(np.square(mat[:, :2]), axis=1)))
|
||||||
|
|
||||||
|
|
||||||
|
def calc_image_size_for_unscaled(image_landmarks, face_type, scale=1.0):
|
||||||
|
# TODO
|
||||||
|
mat = get_transform_mat(image_landmarks, 1, face_type, scale=scale)
|
||||||
|
scale = get_scale_of_mat(mat)
|
||||||
|
return int(1 / scale)
|
||||||
|
|
||||||
|
|
||||||
|
def get_transform_mat(image_landmarks, output_size, face_type, scale=1.0):
|
||||||
if not isinstance(image_landmarks, np.ndarray):
|
if not isinstance(image_landmarks, np.ndarray):
|
||||||
image_landmarks = np.array (image_landmarks)
|
image_landmarks = np.array(image_landmarks)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if face_type == FaceType.AVATAR:
|
if face_type == FaceType.AVATAR:
|
||||||
|
@ -145,35 +178,49 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
||||||
elif face_type == FaceType.FULL:
|
elif face_type == FaceType.FULL:
|
||||||
padding = (output_size / 64) * 12
|
padding = (output_size / 64) * 12
|
||||||
elif face_type == FaceType.HEAD:
|
elif face_type == FaceType.HEAD:
|
||||||
padding = (output_size / 64) * 24
|
padding = (output_size / 64) * 18
|
||||||
else:
|
else:
|
||||||
raise ValueError ('wrong face_type: ', face_type)
|
raise ValueError('wrong face_type: ', face_type)
|
||||||
|
|
||||||
|
|
||||||
mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
|
mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
if output_size != 1:
|
||||||
|
print(
|
||||||
|
f'PREPAD - get_translation_scale_tan_rotation_of_mat: {get_translation_scale_tan_rotation_of_mat(mat)}')
|
||||||
mat = mat * (output_size - 2 * padding)
|
mat = mat * (output_size - 2 * padding)
|
||||||
mat[:,2] += padding
|
mat[:, 2] += padding
|
||||||
mat *= (1 / scale)
|
mat *= (1 / scale)
|
||||||
mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 )
|
mat[:, 2] += -output_size * (((1 / scale) - 1.0) / 2)
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
if output_size != 1:
|
||||||
|
print(
|
||||||
|
f'POSTPAD - get_translation_scale_tan_rotation_of_mat: {get_translation_scale_tan_rotation_of_mat(mat)}')
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f'CALC SCALE - get_translation_scale_tan_rotation_of_mat: {get_translation_scale_tan_rotation_of_mat(mat)}')
|
||||||
|
|
||||||
if remove_align:
|
if remove_align:
|
||||||
bbox = transform_points ( [ (0,0), (0,output_size-1), (output_size-1, output_size-1), (output_size-1,0) ], mat, True)
|
bbox = transform_points([(0, 0), (0, output_size - 1), (output_size - 1, output_size - 1),
|
||||||
area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
|
(output_size - 1, 0)], mat, True)
|
||||||
|
area = mathlib.polygon_area(bbox[:, 0], bbox[:, 1])
|
||||||
side = math.sqrt(area) / 2
|
side = math.sqrt(area) / 2
|
||||||
center = transform_points ( [(output_size/2,output_size/2)], mat, True)
|
center = transform_points([(output_size / 2, output_size / 2)], mat, True)
|
||||||
|
|
||||||
pts1 = np.float32([ center+[-side,-side], center+[side,-side], center+[-side,side] ])
|
pts1 = np.float32([center + [-side, -side], center + [side, -side], center + [-side, side]])
|
||||||
pts2 = np.float32([[0,0],[output_size-1,0],[0,output_size-1]])
|
pts2 = np.float32([[0, 0], [output_size - 1, 0], [0, output_size - 1]])
|
||||||
mat = cv2.getAffineTransform(pts1,pts2)
|
mat = cv2.getAffineTransform(pts1, pts2)
|
||||||
|
|
||||||
return mat
|
return mat
|
||||||
|
|
||||||
def get_image_hull_mask (image_shape, image_landmarks, ie_polys=None):
|
|
||||||
|
def get_image_hull_mask(image_shape, image_landmarks, ie_polys=None):
|
||||||
if len(image_landmarks) != 68:
|
if len(image_landmarks) != 68:
|
||||||
raise Exception('get_image_hull_mask works only with 68 landmarks')
|
raise Exception('get_image_hull_mask works only with 68 landmarks')
|
||||||
int_lmrks = np.array(image_landmarks.copy(), dtype=np.int)
|
int_lmrks = np.array(image_landmarks.copy(), dtype=np.int)
|
||||||
|
|
||||||
hull_mask = np.zeros(image_shape[0:2]+(1,),dtype=np.float32)
|
hull_mask = np.zeros(image_shape[0:2] + (1,), dtype=np.float32)
|
||||||
|
|
||||||
# #nose
|
# #nose
|
||||||
ml_pnt = (int_lmrks[36] + int_lmrks[0]) // 2
|
ml_pnt = (int_lmrks[36] + int_lmrks[0]) // 2
|
||||||
|
@ -214,81 +261,87 @@ def get_image_hull_mask (image_shape, image_landmarks, ie_polys=None):
|
||||||
|
|
||||||
return hull_mask
|
return hull_mask
|
||||||
|
|
||||||
def get_image_eye_mask (image_shape, image_landmarks):
|
|
||||||
|
def get_image_eye_mask(image_shape, image_landmarks):
|
||||||
if len(image_landmarks) != 68:
|
if len(image_landmarks) != 68:
|
||||||
raise Exception('get_image_eye_mask works only with 68 landmarks')
|
raise Exception('get_image_eye_mask works only with 68 landmarks')
|
||||||
|
|
||||||
hull_mask = np.zeros(image_shape[0:2]+(1,),dtype=np.float32)
|
hull_mask = np.zeros(image_shape[0:2] + (1,), dtype=np.float32)
|
||||||
|
|
||||||
cv2.fillConvexPoly( hull_mask, cv2.convexHull( image_landmarks[36:42]), (1,) )
|
cv2.fillConvexPoly(hull_mask, cv2.convexHull(image_landmarks[36:42]), (1,))
|
||||||
cv2.fillConvexPoly( hull_mask, cv2.convexHull( image_landmarks[42:48]), (1,) )
|
cv2.fillConvexPoly(hull_mask, cv2.convexHull(image_landmarks[42:48]), (1,))
|
||||||
|
|
||||||
return hull_mask
|
return hull_mask
|
||||||
|
|
||||||
def blur_image_hull_mask (hull_mask):
|
|
||||||
|
|
||||||
maxregion = np.argwhere(hull_mask==1.0)
|
def blur_image_hull_mask(hull_mask):
|
||||||
miny,minx = maxregion.min(axis=0)[:2]
|
maxregion = np.argwhere(hull_mask == 1.0)
|
||||||
maxy,maxx = maxregion.max(axis=0)[:2]
|
miny, minx = maxregion.min(axis=0)[:2]
|
||||||
|
maxy, maxx = maxregion.max(axis=0)[:2]
|
||||||
lenx = maxx - minx;
|
lenx = maxx - minx;
|
||||||
leny = maxy - miny;
|
leny = maxy - miny;
|
||||||
masky = int(minx+(lenx//2))
|
masky = int(minx + (lenx // 2))
|
||||||
maskx = int(miny+(leny//2))
|
maskx = int(miny + (leny // 2))
|
||||||
lowest_len = min (lenx, leny)
|
lowest_len = min(lenx, leny)
|
||||||
ero = int( lowest_len * 0.085 )
|
ero = int(lowest_len * 0.085)
|
||||||
blur = int( lowest_len * 0.10 )
|
blur = int(lowest_len * 0.10)
|
||||||
|
|
||||||
hull_mask = cv2.erode(hull_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
hull_mask = cv2.erode(hull_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (ero, ero)),
|
||||||
hull_mask = cv2.blur(hull_mask, (blur, blur) )
|
iterations=1)
|
||||||
hull_mask = np.expand_dims (hull_mask,-1)
|
hull_mask = cv2.blur(hull_mask, (blur, blur))
|
||||||
|
hull_mask = np.expand_dims(hull_mask, -1)
|
||||||
|
|
||||||
return hull_mask
|
return hull_mask
|
||||||
|
|
||||||
|
|
||||||
mirror_idxs = [
|
mirror_idxs = [
|
||||||
[0,16],
|
[0, 16],
|
||||||
[1,15],
|
[1, 15],
|
||||||
[2,14],
|
[2, 14],
|
||||||
[3,13],
|
[3, 13],
|
||||||
[4,12],
|
[4, 12],
|
||||||
[5,11],
|
[5, 11],
|
||||||
[6,10],
|
[6, 10],
|
||||||
[7,9],
|
[7, 9],
|
||||||
|
|
||||||
[17,26],
|
[17, 26],
|
||||||
[18,25],
|
[18, 25],
|
||||||
[19,24],
|
[19, 24],
|
||||||
[20,23],
|
[20, 23],
|
||||||
[21,22],
|
[21, 22],
|
||||||
|
|
||||||
[36,45],
|
[36, 45],
|
||||||
[37,44],
|
[37, 44],
|
||||||
[38,43],
|
[38, 43],
|
||||||
[39,42],
|
[39, 42],
|
||||||
[40,47],
|
[40, 47],
|
||||||
[41,46],
|
[41, 46],
|
||||||
|
|
||||||
[31,35],
|
[31, 35],
|
||||||
[32,34],
|
[32, 34],
|
||||||
|
|
||||||
[50,52],
|
[50, 52],
|
||||||
[49,53],
|
[49, 53],
|
||||||
[48,54],
|
[48, 54],
|
||||||
[59,55],
|
[59, 55],
|
||||||
[58,56],
|
[58, 56],
|
||||||
[67,65],
|
[67, 65],
|
||||||
[60,64],
|
[60, 64],
|
||||||
[61,63] ]
|
[61, 63]]
|
||||||
|
|
||||||
def mirror_landmarks (landmarks, val):
|
|
||||||
|
def mirror_landmarks(landmarks, val):
|
||||||
result = landmarks.copy()
|
result = landmarks.copy()
|
||||||
|
|
||||||
for idx in mirror_idxs:
|
for idx in mirror_idxs:
|
||||||
result [ idx ] = result [ idx[::-1] ]
|
result[idx] = result[idx[::-1]]
|
||||||
|
|
||||||
result[:,0] = val - result[:,0] - 1
|
result[:, 0] = val - result[:, 0] - 1
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=False, ie_polys=None):
|
|
||||||
|
def draw_landmarks(image, image_landmarks, color=(0, 255, 0), transparent_mask=False,
|
||||||
|
ie_polys=None):
|
||||||
if len(image_landmarks) != 68:
|
if len(image_landmarks) != 68:
|
||||||
raise Exception('get_image_eye_mask works only with 68 landmarks')
|
raise Exception('get_image_eye_mask works only with 68 landmarks')
|
||||||
|
|
||||||
|
@ -303,47 +356,59 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=Fa
|
||||||
nose = int_lmrks[slice(*landmarks_68_pt["nose"])]
|
nose = int_lmrks[slice(*landmarks_68_pt["nose"])]
|
||||||
|
|
||||||
# open shapes
|
# open shapes
|
||||||
cv2.polylines(image, tuple(np.array([v]) for v in ( right_eyebrow, jaw, left_eyebrow, np.concatenate((nose, [nose[-6]])) )),
|
cv2.polylines(image, tuple(np.array([v]) for v in (
|
||||||
|
right_eyebrow, jaw, left_eyebrow, np.concatenate((nose, [nose[-6]])))),
|
||||||
False, color, lineType=cv2.LINE_AA)
|
False, color, lineType=cv2.LINE_AA)
|
||||||
# closed shapes
|
# closed shapes
|
||||||
cv2.polylines(image, tuple(np.array([v]) for v in (right_eye, left_eye, mouth)),
|
cv2.polylines(image, tuple(np.array([v]) for v in (right_eye, left_eye, mouth)),
|
||||||
True, color, lineType=cv2.LINE_AA)
|
True, color, lineType=cv2.LINE_AA)
|
||||||
# the rest of the cicles
|
# the rest of the cicles
|
||||||
for x, y in np.concatenate((right_eyebrow, left_eyebrow, mouth, right_eye, left_eye, nose), axis=0):
|
for x, y in np.concatenate((right_eyebrow, left_eyebrow, mouth, right_eye, left_eye, nose),
|
||||||
|
axis=0):
|
||||||
cv2.circle(image, (x, y), 1, color, 1, lineType=cv2.LINE_AA)
|
cv2.circle(image, (x, y), 1, color, 1, lineType=cv2.LINE_AA)
|
||||||
# jaw big circles
|
# jaw big circles
|
||||||
for x, y in jaw:
|
for x, y in jaw:
|
||||||
cv2.circle(image, (x, y), 2, color, lineType=cv2.LINE_AA)
|
cv2.circle(image, (x, y), 2, color, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
if transparent_mask:
|
if transparent_mask:
|
||||||
mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys)
|
mask = get_image_hull_mask(image.shape, image_landmarks, ie_polys)
|
||||||
image[...] = ( image * (1-mask) + image * mask / 2 )[...]
|
image[...] = (image * (1 - mask) + image * mask / 2)[...]
|
||||||
|
|
||||||
def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0) ):
|
|
||||||
draw_landmarks(image, image_landmarks, color=landmarks_color, transparent_mask=transparent_mask, ie_polys=ie_polys)
|
|
||||||
imagelib.draw_rect (image, rect, (255,0,0), 2 )
|
|
||||||
|
|
||||||
image_to_face_mat = get_transform_mat (image_landmarks, face_size, face_type)
|
def draw_rect_landmarks(image, rect, image_landmarks, face_size, face_type, transparent_mask=False,
|
||||||
points = transform_points ( [ (0,0), (0,face_size-1), (face_size-1, face_size-1), (face_size-1,0) ], image_to_face_mat, True)
|
ie_polys=None, landmarks_color=(0, 255, 0)):
|
||||||
imagelib.draw_polygon (image, points, (0,0,255), 2)
|
draw_landmarks(image, image_landmarks, color=landmarks_color, transparent_mask=transparent_mask,
|
||||||
|
ie_polys=ie_polys)
|
||||||
|
imagelib.draw_rect(image, rect, (255, 0, 0), 2)
|
||||||
|
|
||||||
|
image_to_face_mat = get_transform_mat(image_landmarks, face_size, face_type)
|
||||||
|
points = transform_points(
|
||||||
|
[(0, 0), (0, face_size - 1), (face_size - 1, face_size - 1), (face_size - 1, 0)],
|
||||||
|
image_to_face_mat, True)
|
||||||
|
imagelib.draw_polygon(image, points, (0, 0, 255), 2)
|
||||||
|
|
||||||
|
|
||||||
def calc_face_pitch(landmarks):
|
def calc_face_pitch(landmarks):
|
||||||
if not isinstance(landmarks, np.ndarray):
|
if not isinstance(landmarks, np.ndarray):
|
||||||
landmarks = np.array (landmarks)
|
landmarks = np.array(landmarks)
|
||||||
t = ( (landmarks[6][1]-landmarks[8][1]) + (landmarks[10][1]-landmarks[8][1]) ) / 2.0
|
t = ((landmarks[6][1] - landmarks[8][1]) + (landmarks[10][1] - landmarks[8][1])) / 2.0
|
||||||
b = landmarks[8][1]
|
b = landmarks[8][1]
|
||||||
return float(b-t)
|
return float(b - t)
|
||||||
|
|
||||||
|
|
||||||
def calc_face_yaw(landmarks):
|
def calc_face_yaw(landmarks):
|
||||||
if not isinstance(landmarks, np.ndarray):
|
if not isinstance(landmarks, np.ndarray):
|
||||||
landmarks = np.array (landmarks)
|
landmarks = np.array(landmarks)
|
||||||
l = ( (landmarks[27][0]-landmarks[0][0]) + (landmarks[28][0]-landmarks[1][0]) + (landmarks[29][0]-landmarks[2][0]) ) / 3.0
|
l = ((landmarks[27][0] - landmarks[0][0]) + (landmarks[28][0] - landmarks[1][0]) + (
|
||||||
r = ( (landmarks[16][0]-landmarks[27][0]) + (landmarks[15][0]-landmarks[28][0]) + (landmarks[14][0]-landmarks[29][0]) ) / 3.0
|
landmarks[29][0] - landmarks[2][0])) / 3.0
|
||||||
return float(r-l)
|
r = ((landmarks[16][0] - landmarks[27][0]) + (landmarks[15][0] - landmarks[28][0]) + (
|
||||||
|
landmarks[14][0] - landmarks[29][0])) / 3.0
|
||||||
|
return float(r - l)
|
||||||
|
|
||||||
#returns pitch,yaw,roll [-1...+1]
|
|
||||||
|
# returns pitch,yaw,roll [-1...+1]
|
||||||
def estimate_pitch_yaw_roll(aligned_256px_landmarks):
|
def estimate_pitch_yaw_roll(aligned_256px_landmarks):
|
||||||
shape = (256,256)
|
shape = (256, 256)
|
||||||
focal_length = shape[1]
|
focal_length = shape[1]
|
||||||
camera_center = (shape[1] / 2, shape[0] / 2)
|
camera_center = (shape[1] / 2, shape[0] / 2)
|
||||||
camera_matrix = np.array(
|
camera_matrix = np.array(
|
||||||
|
@ -355,10 +420,10 @@ def estimate_pitch_yaw_roll(aligned_256px_landmarks):
|
||||||
landmarks_68_3D,
|
landmarks_68_3D,
|
||||||
aligned_256px_landmarks.astype(np.float32),
|
aligned_256px_landmarks.astype(np.float32),
|
||||||
camera_matrix,
|
camera_matrix,
|
||||||
np.zeros((4, 1)) )
|
np.zeros((4, 1)))
|
||||||
|
|
||||||
pitch, yaw, roll = mathlib.rotationMatrixToEulerAngles( cv2.Rodrigues(rotation_vector)[0] )
|
pitch, yaw, roll = mathlib.rotationMatrixToEulerAngles(cv2.Rodrigues(rotation_vector)[0])
|
||||||
pitch = np.clip ( pitch/1.30, -1.0, 1.0 )
|
pitch = np.clip(pitch / 1.30, -1.0, 1.0)
|
||||||
yaw = np.clip ( yaw / 1.11, -1.0, 1.0 )
|
yaw = np.clip(yaw / 1.11, -1.0, 1.0)
|
||||||
roll = np.clip ( roll/3.15, -1.0, 1.0 )
|
roll = np.clip(roll / 3.15, -1.0, 1.0)
|
||||||
return -pitch, yaw, roll
|
return -pitch, yaw, roll
|
||||||
|
|
|
@ -48,9 +48,10 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
self.final_output_path = Path(client_dict['final_output_dir']) if 'final_output_dir' in client_dict.keys() else None
|
self.final_output_path = Path(client_dict['final_output_dir']) if 'final_output_dir' in client_dict.keys() else None
|
||||||
self.debug_dir = client_dict['debug_dir']
|
self.debug_dir = client_dict['debug_dir']
|
||||||
self.image_size = client_dict['image_size']
|
self.image_size = client_dict['image_size']
|
||||||
|
self.log_info(f'on_initialize: {client_dict}')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#transfer and set stdin in order to work code.interact in debug subprocess
|
#transfer and set stdin in order to work code.interact in debug subprocess
|
||||||
stdin_fd = client_dict['stdin_fd']
|
stdin_fd = client_dict['stdin_fd']
|
||||||
if stdin_fd is not None and DEBUG:
|
if stdin_fd is not None and DEBUG:
|
||||||
|
@ -111,7 +112,7 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def process_data(self, data):
|
def process_data(self, data):
|
||||||
filename_path = Path( data.filename )
|
filename_path = Path(data.filename)
|
||||||
|
|
||||||
filename_path_str = str(filename_path)
|
filename_path_str = str(filename_path)
|
||||||
if self.cached_image[0] == filename_path_str:
|
if self.cached_image[0] == filename_path_str:
|
||||||
|
@ -168,7 +169,7 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
elif rot == 270:
|
elif rot == 270:
|
||||||
rotated_image = image.swapaxes( 0,1 )[::-1,:,:]
|
rotated_image = image.swapaxes( 0,1 )[::-1,:,:]
|
||||||
|
|
||||||
rects = data.rects = self.e.extract (rotated_image, is_bgr=True)
|
rects = data.rects = self.e.extract(rotated_image, is_bgr=True)
|
||||||
if len(rects) != 0:
|
if len(rects) != 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -185,7 +186,7 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
elif data.rects_rotation == 270:
|
elif data.rects_rotation == 270:
|
||||||
rotated_image = image.swapaxes( 0,1 )[::-1,:,:]
|
rotated_image = image.swapaxes( 0,1 )[::-1,:,:]
|
||||||
|
|
||||||
data.landmarks = self.e.extract (rotated_image, data.rects, self.second_pass_e if (src_dflimg is None and data.landmarks_accurate) else None, is_bgr=True)
|
data.landmarks = self.e.extract(rotated_image, data.rects, self.second_pass_e if (src_dflimg is None and data.landmarks_accurate) else None, is_bgr=True)
|
||||||
if data.rects_rotation != 0:
|
if data.rects_rotation != 0:
|
||||||
for i, (rect, lmrks) in enumerate(zip(data.rects, data.landmarks)):
|
for i, (rect, lmrks) in enumerate(zip(data.rects, data.landmarks)):
|
||||||
new_rect, new_lmrks = rect, lmrks
|
new_rect, new_lmrks = rect, lmrks
|
||||||
|
@ -220,7 +221,8 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
debug_image = image.copy()
|
debug_image = image.copy()
|
||||||
|
|
||||||
if src_dflimg is not None and len(rects) != 1:
|
if src_dflimg is not None and len(rects) != 1:
|
||||||
#if re-extracting from dflimg and more than 1 or zero faces detected - dont process and just copy it
|
# if re-extracting from dflimg and more than 1 or zero faces detected:
|
||||||
|
# don't process and just copy it
|
||||||
print("src_dflimg is not None and len(rects) != 1", str(filename_path) )
|
print("src_dflimg is not None and len(rects) != 1", str(filename_path) )
|
||||||
output_file = str(self.final_output_path / filename_path.name)
|
output_file = str(self.final_output_path / filename_path.name)
|
||||||
if str(filename_path) != str(output_file):
|
if str(filename_path) != str(output_file):
|
||||||
|
@ -230,7 +232,7 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
face_idx = 0
|
face_idx = 0
|
||||||
for rect, image_landmarks in zip( rects, landmarks ):
|
for rect, image_landmarks in zip( rects, landmarks ):
|
||||||
if src_dflimg is not None and face_idx > 1:
|
if src_dflimg is not None and face_idx > 1:
|
||||||
#cannot extract more than 1 face from dflimg
|
# cannot extract more than 1 face from dflimg
|
||||||
break
|
break
|
||||||
|
|
||||||
if image_landmarks is None:
|
if image_landmarks is None:
|
||||||
|
@ -238,30 +240,36 @@ class ExtractSubprocessor(Subprocessor):
|
||||||
|
|
||||||
rect = np.array(rect)
|
rect = np.array(rect)
|
||||||
rect_area = mathlib.polygon_area(np.array(rect[[0, 2, 2, 0]]), np.array(rect[[1, 1, 3, 3]]))
|
rect_area = mathlib.polygon_area(np.array(rect[[0, 2, 2, 0]]), np.array(rect[[1, 1, 3, 3]]))
|
||||||
if self.image_size == 0:
|
|
||||||
self.image_size = int(math.sqrt(rect_area))
|
|
||||||
|
|
||||||
|
# `self.image_size` is the output size for the entire process,
|
||||||
|
# we don't want to overwrite it
|
||||||
|
face_image_size = self.image_size
|
||||||
|
# TODO
|
||||||
|
self.log_info(f'BEFORE if face_image_size==0: {face_image_size}')
|
||||||
|
if face_image_size == 0:
|
||||||
|
face_image_size = LandmarksProcessor.calc_image_size_for_unscaled(image_landmarks, 1, self.face_type)
|
||||||
|
# TODO
|
||||||
|
self.log_info(f'AFTER if face_image_size==0: {face_image_size}')
|
||||||
|
|
||||||
if self.face_type == FaceType.MARK_ONLY:
|
if self.face_type == FaceType.MARK_ONLY:
|
||||||
image_to_face_mat = None
|
image_to_face_mat = None
|
||||||
face_image = image
|
face_image = image
|
||||||
face_image_landmarks = image_landmarks
|
face_image_landmarks = image_landmarks
|
||||||
else:
|
else:
|
||||||
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
|
image_to_face_mat = LandmarksProcessor.get_transform_mat(image_landmarks, face_image_size, self.face_type)
|
||||||
|
face_image = cv2.warpAffine(image, image_to_face_mat, (face_image_size, face_image_size), cv2.INTER_LANCZOS4)
|
||||||
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
|
# TODO
|
||||||
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
|
self.log_info(f'warpAffine size: {face_image.shape[[1, 0]]}')
|
||||||
|
face_image_landmarks = LandmarksProcessor.transform_points(image_landmarks, image_to_face_mat)
|
||||||
landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,self.image_size-1), (self.image_size-1, self.image_size-1), (self.image_size-1,0) ], image_to_face_mat, True)
|
landmarks_bbox = LandmarksProcessor.transform_points([(0,0), (0, face_image_size-1), (face_image_size-1, face_image_size-1), (face_image_size-1,0) ], image_to_face_mat, True)
|
||||||
|
|
||||||
|
|
||||||
landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0], landmarks_bbox[:,1] )
|
landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0], landmarks_bbox[:,1] )
|
||||||
|
|
||||||
if landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
|
if self.face_type is not FaceType.HEAD and landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.debug_dir is not None:
|
if self.debug_dir is not None:
|
||||||
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type, transparent_mask=True)
|
LandmarksProcessor.draw_rect_landmarks(debug_image, rect, image_landmarks, face_image_size, self.face_type, transparent_mask=True)
|
||||||
|
|
||||||
if filename_path.suffix == '.jpg':
|
if filename_path.suffix == '.jpg':
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue