removing smooth_rect option

This commit is contained in:
Colombo 2020-02-18 10:28:01 +04:00
parent 01376fd17c
commit 4f928074b9
6 changed files with 119 additions and 180 deletions

View file

@ -249,9 +249,7 @@ def transform_points(points, mat, invert=False):
points = np.squeeze(points)
return points
def get_transform_mat_data (image_landmarks, face_type, scale=1.0):
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
if not isinstance(image_landmarks, np.ndarray):
image_landmarks = np.array (image_landmarks)
@ -269,112 +267,9 @@ def get_transform_mat_data (image_landmarks, face_type, scale=1.0):
bt_diag_vec /= npla.norm(bt_diag_vec)
# calc modifier of diagonal vectors for scale and padding value
padding, _ = FaceType_to_padding_remove_align.get(face_type, 0.0)
mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )
return l_c, tb_diag_vec, bt_diag_vec, mod
def get_transform_mat_by_data (l_c, tb_diag_vec, bt_diag_vec, mod, output_size, face_type):
_, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)
# calc 3 points in global space to estimate 2d affine transform
if not remove_align:
l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
np.round( l_c + bt_diag_vec*mod ),
np.round( l_c + tb_diag_vec*mod ) ] )
else:
# remove_align - face will be centered in the frame but not aligned
l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
np.round( l_c + bt_diag_vec*mod ),
np.round( l_c + tb_diag_vec*mod ),
np.round( l_c - bt_diag_vec*mod ),
] )
# get area of face square in global space
area = mathlib.polygon_area(l_t[:,0], l_t[:,1] )
# calc side of square
side = np.float32(math.sqrt(area) / 2)
# calc 3 points with unrotated square
l_t = np.array( [ np.round( l_c + [-side,-side] ),
np.round( l_c + [ side,-side] ),
np.round( l_c + [ side, side] ) ] )
# calc affine transform from 3 global space points to 3 local space points size of 'output_size'
pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
mat = cv2.getAffineTransform(l_t,pts2)
return mat
def get_averaged_transform_mat (img_landmarks,
img_landmarks_prev,
img_landmarks_next,
average_frame_count,
average_center_frame_count,
output_size, face_type, scale=1.0):
l_c_list = []
tb_diag_vec_list = []
bt_diag_vec_list = []
mod_list = []
count = max(average_frame_count,average_center_frame_count)
for i in range ( -count, count+1, 1 ):
if i < 0:
lmrks = img_landmarks_prev[i] if -i < len(img_landmarks_prev) else None
elif i > 0:
lmrks = img_landmarks_next[i] if i < len(img_landmarks_next) else None
else:
lmrks = img_landmarks
if lmrks is None:
continue
l_c, tb_diag_vec, bt_diag_vec, mod = get_transform_mat_data (lmrks, face_type, scale=scale)
if i >= -average_frame_count and i <= average_frame_count:
tb_diag_vec_list.append(tb_diag_vec)
bt_diag_vec_list.append(bt_diag_vec)
mod_list.append(mod)
if i >= -average_center_frame_count and i <= average_center_frame_count:
l_c_list.append(l_c)
tb_diag_vec = np.mean( np.array(tb_diag_vec_list), axis=0 )
bt_diag_vec = np.mean( np.array(bt_diag_vec_list), axis=0 )
mod = np.mean( np.array(mod_list), axis=0 )
l_c = np.mean( np.array(l_c_list), axis=0 )
return get_transform_mat_by_data (l_c, tb_diag_vec, bt_diag_vec, mod, output_size, face_type)
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
l_c, tb_diag_vec, bt_diag_vec, mod = get_transform_mat_data (image_landmarks, face_type, scale=scale)
return get_transform_mat_by_data (l_c, tb_diag_vec, bt_diag_vec, mod, output_size, face_type)
"""
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
if not isinstance(image_landmarks, np.ndarray):
image_landmarks = np.array (image_landmarks)
# get face padding value for FaceType
padding, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)
# estimate landmarks transform from global space to local aligned space with bounds [0..1]
mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
# get corner points in global space
l_p = transform_points ( np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5)]) , mat, True)
l_c = l_p[4]
# calc diagonal vectors between corners in global space
tb_diag_vec = (l_p[2]-l_p[0]).astype(np.float32)
tb_diag_vec /= npla.norm(tb_diag_vec)
bt_diag_vec = (l_p[1]-l_p[3]).astype(np.float32)
bt_diag_vec /= npla.norm(bt_diag_vec)
# calc modifier of diagonal vectors for scale and padding value
mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )
# calc 3 points in global space to estimate 2d affine transform
if not remove_align:
l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
@ -402,9 +297,8 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
# calc affine transform from 3 global space points to 3 local space points size of 'output_size'
pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
mat = cv2.getAffineTransform(l_t,pts2)
return mat
"""
def expand_eyebrows(lmrks, eyebrows_expand_mod=1.0):
if len(lmrks) != 68:
raise Exception('works only with 68 landmarks')
@ -826,4 +720,102 @@ def estimate_pitch_yaw_roll(aligned_256px_landmarks):
# x_diff = transform_points( [ np.float32( (0,0) ), np.float32((x_diff,0)) ], mat, True)
# x_diff = x_diff[1]-x_diff[0]
#
# mat = cv2.getAffineTransform( l_t+y_diff+x_diff ,pts2)
# mat = cv2.getAffineTransform( l_t+y_diff+x_diff ,pts2)
"""
def get_averaged_transform_mat (img_landmarks,
img_landmarks_prev,
img_landmarks_next,
average_frame_count,
average_center_frame_count,
output_size, face_type, scale=1.0):
l_c_list = []
tb_diag_vec_list = []
bt_diag_vec_list = []
mod_list = []
count = max(average_frame_count,average_center_frame_count)
for i in range ( -count, count+1, 1 ):
if i < 0:
lmrks = img_landmarks_prev[i] if -i < len(img_landmarks_prev) else None
elif i > 0:
lmrks = img_landmarks_next[i] if i < len(img_landmarks_next) else None
else:
lmrks = img_landmarks
if lmrks is None:
continue
l_c, tb_diag_vec, bt_diag_vec, mod = get_transform_mat_data (lmrks, face_type, scale=scale)
if i >= -average_frame_count and i <= average_frame_count:
tb_diag_vec_list.append(tb_diag_vec)
bt_diag_vec_list.append(bt_diag_vec)
mod_list.append(mod)
if i >= -average_center_frame_count and i <= average_center_frame_count:
l_c_list.append(l_c)
tb_diag_vec = np.mean( np.array(tb_diag_vec_list), axis=0 )
bt_diag_vec = np.mean( np.array(bt_diag_vec_list), axis=0 )
mod = np.mean( np.array(mod_list), axis=0 )
l_c = np.mean( np.array(l_c_list), axis=0 )
return get_transform_mat_by_data (l_c, tb_diag_vec, bt_diag_vec, mod, output_size, face_type)
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
if not isinstance(image_landmarks, np.ndarray):
image_landmarks = np.array (image_landmarks)
# get face padding value for FaceType
padding, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)
# estimate landmarks transform from global space to local aligned space with bounds [0..1]
mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
# get corner points in global space
l_p = transform_points ( np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5)]) , mat, True)
l_c = l_p[4]
# calc diagonal vectors between corners in global space
tb_diag_vec = (l_p[2]-l_p[0]).astype(np.float32)
tb_diag_vec /= npla.norm(tb_diag_vec)
bt_diag_vec = (l_p[1]-l_p[3]).astype(np.float32)
bt_diag_vec /= npla.norm(bt_diag_vec)
# calc modifier of diagonal vectors for scale and padding value
mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )
# calc 3 points in global space to estimate 2d affine transform
if not remove_align:
l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
np.round( l_c + bt_diag_vec*mod ),
np.round( l_c + tb_diag_vec*mod ) ] )
else:
# remove_align - face will be centered in the frame but not aligned
l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ),
np.round( l_c + bt_diag_vec*mod ),
np.round( l_c + tb_diag_vec*mod ),
np.round( l_c - bt_diag_vec*mod ),
] )
# get area of face square in global space
area = mathlib.polygon_area(l_t[:,0], l_t[:,1] )
# calc side of square
side = np.float32(math.sqrt(area) / 2)
# calc 3 points with unrotated square
l_t = np.array( [ np.round( l_c + [-side,-side] ),
np.round( l_c + [ side,-side] ),
np.round( l_c + [ side, side] ) ] )
# calc affine transform from 3 global space points to 3 local space points size of 'output_size'
pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
mat = cv2.getAffineTransform(l_t,pts2)
return mat
"""