mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 22:34:25 -07:00
adds in facial mesh code, eos library for next prebuilt
This commit is contained in:
parent
322333cb6f
commit
4c062bd783
23 changed files with 88317 additions and 0 deletions
|
@ -125,6 +125,7 @@ install:
|
|||
- cmd: call .\DeepFaceLab%ARCH%\_internal\setenv.bat
|
||||
- cmd: python -m pip install Flask==1.1.1
|
||||
- cmd: python -m pip install flask-socketio==4.2.1
|
||||
- cmd: python -m pip install eos-py==1.1.2
|
||||
#- cmd: dir .\DeepFaceLab%ARCH%\_internal
|
||||
#- cmd: dir .\DeepFaceLab%ARCH%\_internal\DeepFaceLab
|
||||
- cmd: dir .\DeepFaceLab%ARCH%\_internal\python-3.6.8\Lib\site-packages
|
||||
|
|
280
facelib/FacialMesh.py
Normal file
280
facelib/FacialMesh.py
Normal file
|
@ -0,0 +1,280 @@
|
|||
import os
|
||||
import cv2
|
||||
# noinspection PyUnresolvedReferences
|
||||
import eos
|
||||
import numpy as np
|
||||
|
||||
"""
|
||||
This app demonstrates estimation of the camera and fitting of the shape
|
||||
model of a 3D Morphable Model from an ibug LFPW image with its landmarks.
|
||||
In addition to fit-model-simple, this example uses blendshapes, contour-
|
||||
fitting, and can iterate the fitting.
|
||||
|
||||
See python demo in repo: https://github.com/patrikhuber/eos/blob/master/python/demo.py
|
||||
|
||||
68 ibug landmarks are loaded from the .pts file and converted
|
||||
to vertex indices using the LandmarkMapper.
|
||||
"""
|
||||
|
||||
EOS_DIR = os.path.join(os.path.dirname(__file__), 'eos')
|
||||
EOS_MODEL = os.path.join(EOS_DIR, 'sfm_shape_3448.bin')
|
||||
EOS_BLENDSHAPES = os.path.join(EOS_DIR, 'expression_blendshapes_3448.bin')
|
||||
EOS_MAPPER = os.path.join(EOS_DIR, 'ibug_to_sfm.txt')
|
||||
EOS_EDGE_TOPO = os.path.join(EOS_DIR, 'sfm_3448_edge_topology.json')
|
||||
EOS_CONTOURS = os.path.join(EOS_DIR, 'sfm_model_contours.json')
|
||||
|
||||
|
||||
def get_mesh_mask(image_shape, image_landmarks, ie_polys=None):
|
||||
"""
|
||||
Gets a full-face mask from aligning a mesh to the facial landmarks
|
||||
:param image_shape:
|
||||
:param image_landmarks:
|
||||
:param ie_polys:
|
||||
:return:
|
||||
"""
|
||||
mesh, pose = _predict_3d_mesh(image_landmarks, image_shape)
|
||||
|
||||
projected = _project_points(mesh, pose, image_shape)
|
||||
points = _center_and_reduce_to_2d(projected, image_shape)
|
||||
|
||||
return _create_mask(points, mesh.tvi, image_shape, ie_polys)
|
||||
|
||||
|
||||
def get_mesh_landmarks(landmarks, image):
|
||||
"""
|
||||
Purely for testing
|
||||
:param landmarks:
|
||||
:param image:
|
||||
:return:
|
||||
"""
|
||||
mesh, pose = _predict_3d_mesh(landmarks, image.shape)
|
||||
|
||||
projected = _project_points(mesh, pose, image.shape)
|
||||
points = _center_and_reduce_to_2d(projected, image.shape)
|
||||
|
||||
isomap = _get_texture(mesh, pose, image)
|
||||
|
||||
mask = _create_mask(points, mesh.tvi, image.shape)
|
||||
|
||||
return points, isomap, mask
|
||||
|
||||
|
||||
def _format_landmarks_for_eos(landmarks):
|
||||
"""
|
||||
Formats landmarks for eos
|
||||
:param landmarks:
|
||||
:return:
|
||||
"""
|
||||
eos_landmarks = []
|
||||
ibug_index = 1 # count from 1 to 68 for all ibug landmarks
|
||||
for coords in landmarks:
|
||||
eos_landmarks.append(eos.core.Landmark(str(ibug_index), [coords[0], coords[1]]))
|
||||
ibug_index = ibug_index + 1
|
||||
return eos_landmarks
|
||||
|
||||
|
||||
def _predict_3d_mesh(landmarks, image_shape):
|
||||
"""
|
||||
Predicts the 3D mesh using landmarks
|
||||
:param landmarks:
|
||||
:param image_shape:
|
||||
:return:
|
||||
"""
|
||||
image_height, image_width, _ = image_shape
|
||||
model = eos.morphablemodel.load_model(EOS_MODEL)
|
||||
|
||||
# The expression blendshapes:
|
||||
blendshapes = eos.morphablemodel.load_blendshapes(EOS_BLENDSHAPES)
|
||||
|
||||
# Create a MorphableModel with expressions from the loaded neutral model and blendshapes:
|
||||
# morphablemodel_with_expressions = eos.morphablemodel.MorphableModel(model.get_shape_model(), blendshapes,
|
||||
# color_model=eos.morphablemodel.PcaModel(),
|
||||
# vertex_definitions=None,
|
||||
# texture_coordinates=model.get_texture_coordinates())
|
||||
|
||||
# Create a MorphableModel with expressions from the loaded neutral model and blendshapes:
|
||||
morphablemodel_with_expressions = eos.morphablemodel.MorphableModel(model.get_shape_model(), blendshapes,
|
||||
color_model=eos.morphablemodel.PcaModel(),
|
||||
vertex_definitions=None,
|
||||
texture_coordinates=model.get_texture_coordinates())
|
||||
|
||||
# The landmark mapper is used to map 2D landmark points (e.g. from the ibug scheme) to vertex ids:
|
||||
landmark_mapper = eos.core.LandmarkMapper(EOS_MAPPER)
|
||||
|
||||
# The edge topology is used to speed up computation of the occluding face contour fitting:
|
||||
edge_topology = eos.morphablemodel.load_edge_topology(EOS_EDGE_TOPO)
|
||||
|
||||
# These two are used to fit the front-facing contour to the ibug contour landmarks:
|
||||
contour_landmarks = eos.fitting.ContourLandmarks.load(EOS_MAPPER)
|
||||
model_contour = eos.fitting.ModelContour.load(EOS_CONTOURS)
|
||||
|
||||
# Formats the landmarks for eos
|
||||
eos_landmarks = _format_landmarks_for_eos(landmarks)
|
||||
|
||||
# Fit the model, get back a mesh and the pose:
|
||||
(mesh, pose, shape_coeffs, blendshape_coeffs) = eos.fitting.fit_shape_and_pose(morphablemodel_with_expressions,
|
||||
eos_landmarks, landmark_mapper,
|
||||
image_width, image_height,
|
||||
edge_topology, contour_landmarks,
|
||||
model_contour)
|
||||
# can be saved as *.obj, *.isomap.png
|
||||
return mesh, pose
|
||||
|
||||
|
||||
def _get_pitch_yaw_roll(pose):
|
||||
pitch, yaw, roll = pose.get_rotation_euler_angles()
|
||||
return pitch, yaw, roll
|
||||
|
||||
|
||||
# Extract the texture from the image using given mesh and camera parameters:
|
||||
def _get_texture(mesh, pose, image, resolution=512):
|
||||
return eos.render.extract_texture(mesh, pose, image, isomap_resolution=resolution)
|
||||
|
||||
|
||||
# based on https://github.com/patrikhuber/eos/issues/140#issuecomment-314775288
|
||||
def _get_opencv_viewport(image_shape):
|
||||
height, width, _ = image_shape
|
||||
return np.array([0, height, width, -height])
|
||||
|
||||
|
||||
def _get_viewport_matrix(image_shape):
|
||||
viewport = _get_opencv_viewport(image_shape)
|
||||
viewport_matrix = np.zeros((4, 4))
|
||||
viewport_matrix[0, 0] = 0.5 * viewport[2]
|
||||
viewport_matrix[3, 0] = 0.5 * viewport[2] + viewport[0]
|
||||
viewport_matrix[1, 1] = 0.5 * viewport[3]
|
||||
viewport_matrix[3, 1] = 0.5 * viewport[3] + viewport[1]
|
||||
viewport_matrix[2, 2] = 0.5
|
||||
viewport_matrix[3, 2] = 0.5
|
||||
return viewport_matrix
|
||||
|
||||
|
||||
def _project_points(mesh, pose, image_shape):
|
||||
"""
|
||||
Projects mesh points back into 2D
|
||||
:param mesh:
|
||||
:param pose:
|
||||
:param image_shape:
|
||||
:return:
|
||||
"""
|
||||
# project through pose
|
||||
points = np.asarray(mesh.vertices)
|
||||
vpm = _get_viewport_matrix(image_shape)
|
||||
projection = pose.get_projection()
|
||||
modelview = pose.get_modelview()
|
||||
|
||||
points = np.concatenate([points, np.ones((points.shape[0], 1), dtype=points.dtype)], axis=1)
|
||||
return np.asarray([vpm.dot(projection).dot(modelview).dot(point) for point in points])
|
||||
|
||||
|
||||
def _center_and_reduce_to_2d(points, image_shape):
|
||||
"""
|
||||
Centers the points on image, and reduces quaternion to 2D
|
||||
:param points:
|
||||
:param image_shape:
|
||||
:return:
|
||||
"""
|
||||
height, width, _ = image_shape
|
||||
return points[:, :2] + [width / 2, height / 2]
|
||||
|
||||
|
||||
def _create_mask(points, tvi, image_shape, ie_polys=None):
|
||||
"""
|
||||
Creates a mask using the mesh vertices and their triangular face indices
|
||||
:param points: The mesh vertices, projected in 2D, shape (N, 2)
|
||||
:param tvi: the triangular vertex indices, shape (N, 3, 1)
|
||||
:param image_shape: height, width, channels of image
|
||||
:param ie_polys:
|
||||
:return: mask of points covered by mesh
|
||||
"""
|
||||
mask = np.zeros((image_shape[:2] + (1,)), dtype=np.uint8)
|
||||
triangles = points[tvi]
|
||||
mouth = points[MOUTH_SFM_LANDMARKS]
|
||||
|
||||
triangles = triangles[_is_triangle_ccw(triangles)] # filter out the backfaces
|
||||
|
||||
np.rint(triangles, out=triangles)
|
||||
triangles = triangles.astype(np.int32)
|
||||
|
||||
np.rint(mouth, out=mouth)
|
||||
mouth = mouth.astype(np.int32)
|
||||
|
||||
cv2.fillPoly(mask, triangles, (255,))
|
||||
cv2.fillPoly(mask, [mouth], (255,))
|
||||
|
||||
contours, hierarchy = cv2.findContours(np.copy(mask), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||
cv2.drawContours(mask, contours, 0, (255,), thickness=-1)
|
||||
|
||||
mask = mask.astype(np.float32) / 255
|
||||
|
||||
if ie_polys is not None:
|
||||
ie_polys.overlay_mask(mask)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def _is_triangle_ccw(triangle_vertices):
|
||||
"""
|
||||
Returns the boolean masks for an array of triangular vertices, testing whether a face is clockwise (facing away)
|
||||
or counter-clockwise (facing towards camera). Compares the slope of the first two segments
|
||||
:param triangle_vertices: numpy array of shape (N, 3, 2)
|
||||
:return: numpy boolean mask of shape (N, )
|
||||
"""
|
||||
vert_0_x, vert_0_y = triangle_vertices[:, 0, 0], triangle_vertices[:, 0, 1]
|
||||
vert_1_x, vert_1_y = triangle_vertices[:, 1, 0], triangle_vertices[:, 1, 1]
|
||||
vert_2_x, vert_2_y = triangle_vertices[:, 2, 0], triangle_vertices[:, 2, 1]
|
||||
|
||||
return ((vert_1_y - vert_0_y) * (vert_2_x - vert_1_x)) > ((vert_2_y - vert_1_y) * (vert_1_x - vert_0_x))
|
||||
|
||||
|
||||
""" The mesh landmarks surrounding the mouth (unfilled in mesh) """
|
||||
MOUTH_SFM_LANDMARKS = [
|
||||
398,
|
||||
3446,
|
||||
408,
|
||||
3253,
|
||||
406,
|
||||
3164,
|
||||
404,
|
||||
3115,
|
||||
402,
|
||||
3257,
|
||||
399,
|
||||
3374,
|
||||
442,
|
||||
3376,
|
||||
813,
|
||||
3260,
|
||||
815,
|
||||
3119,
|
||||
817,
|
||||
3167,
|
||||
819,
|
||||
3256,
|
||||
821,
|
||||
3447,
|
||||
812,
|
||||
3427,
|
||||
823,
|
||||
3332,
|
||||
826,
|
||||
3157,
|
||||
828,
|
||||
3212,
|
||||
830,
|
||||
3382,
|
||||
832,
|
||||
3391,
|
||||
423,
|
||||
3388,
|
||||
420,
|
||||
3381,
|
||||
418,
|
||||
3211,
|
||||
416,
|
||||
3155,
|
||||
414,
|
||||
3331,
|
||||
410,
|
||||
3426,
|
||||
]
|
24
facelib/eos/bfm2009_model_contours.json
Executable file
24
facelib/eos/bfm2009_model_contours.json
Executable file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"model_contour": {
|
||||
"right_contour": [
|
||||
27302,
|
||||
46587,
|
||||
45866,
|
||||
2445,
|
||||
1260,
|
||||
721,
|
||||
316,
|
||||
27431
|
||||
],
|
||||
"left_contour": [
|
||||
27689,
|
||||
16312,
|
||||
15943,
|
||||
15450,
|
||||
14313,
|
||||
50480,
|
||||
49788,
|
||||
27818
|
||||
]
|
||||
}
|
||||
}
|
30
facelib/eos/bfm2017-1_bfm_nomouth_model_contours.json
Executable file
30
facelib/eos/bfm2017-1_bfm_nomouth_model_contours.json
Executable file
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"model_contour": {
|
||||
"right_contour": [
|
||||
22451,
|
||||
22463,
|
||||
22231,
|
||||
21961,
|
||||
21591,
|
||||
21737,
|
||||
23037,
|
||||
43151,
|
||||
44368,
|
||||
45617,
|
||||
46999
|
||||
],
|
||||
"left_contour": [
|
||||
31991,
|
||||
32002,
|
||||
32272,
|
||||
32795,
|
||||
33069,
|
||||
33088,
|
||||
32329,
|
||||
52325,
|
||||
50233,
|
||||
49299,
|
||||
48560
|
||||
]
|
||||
}
|
||||
}
|
BIN
facelib/eos/expression_blendshapes_3448.bin
Executable file
BIN
facelib/eos/expression_blendshapes_3448.bin
Executable file
Binary file not shown.
53
facelib/eos/ibug_to_bfm2009.txt
Executable file
53
facelib/eos/ibug_to_bfm2009.txt
Executable file
|
@ -0,0 +1,53 @@
|
|||
# Mapping from the 68-point ibug annotations to the BFM (2009) (3DMM vertex indices).
|
||||
# The numbers in brackets are MPEG-4 facial feature point numbers.
|
||||
|
||||
[landmark_mappings] # A mapping from input landmarks (ibug, lhs) to output landmarks (BFM, rhs)
|
||||
# 1 to 8 are the right contour landmarks
|
||||
# chin bottom (2.1, MPEG point not marked in the BFM)
|
||||
# 10 to 17 are the left contour landmarks
|
||||
18 = 38792 # right eyebrow outer-corner (4.6)
|
||||
20 = 40087 # right eyebrow middle, vertical middle (4.4, the MPEG point is on top of the brow though)
|
||||
22 = 40514 # right eyebrow inner-corner (4.2)
|
||||
23 = 41091 # left eyebrow inner-corner (4.1)
|
||||
25 = 41511 # left eyebrow middle (4.3, the MPEG point is on top of the brow though)
|
||||
27 = 42825 # left eyebrow outer-corner (4.5)
|
||||
31 = 8319 # nose-tip (9.3)
|
||||
34 = 8334 # nose-lip junction (9.15)
|
||||
37 = 2088 # right eye outer-corner (3.12)
|
||||
40 = 5959 # right eye inner-corner (3.8)
|
||||
43 = 10603 # left eye inner-corner (3.11)
|
||||
46 = 14472 # left eye outer-corner (3.7)
|
||||
49 = 5006 # right mouth corner (8.4)
|
||||
52 = 8344 # upper lip middle top (8.1)
|
||||
55 = 11714 # left mouth corner (8.3)
|
||||
58 = 8374 # lower lip middle bottom (8.2)
|
||||
#61 # right inner corner of the mouth (2.5)
|
||||
#62 # upper lip right bottom outer (2.7)
|
||||
63 = 8354 # upper lip middle bottom (2.2)
|
||||
#64 # upper lip left bottom outer (2.6)
|
||||
#65 # left inner corner of the mouth (2.4)
|
||||
#66 # lower lip left top outer (2.8)
|
||||
67 = 8366 # lower lip middle top (2.3)
|
||||
#68 # lower lip right top outer (2.9)
|
||||
|
||||
|
||||
# Definitions of which 2D landmarks make up the right and left face contours:
|
||||
[contour_landmarks]
|
||||
right = [ 1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8
|
||||
]
|
||||
left = [ 10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17
|
||||
]
|
60
facelib/eos/ibug_to_bfm2017-1_bfm_nomouth.txt
Executable file
60
facelib/eos/ibug_to_bfm2017-1_bfm_nomouth.txt
Executable file
|
@ -0,0 +1,60 @@
|
|||
# Mapping from the 68-point ibug annotations to the BFM2017 head model (vertex indices).
|
||||
|
||||
[landmark_mappings] # A mapping from input landmarks (ibug, lhs) to output landmarks (BFM, rhs)
|
||||
# 1 to 8 are the right contour landmarks
|
||||
9 = 47846 # chin bottom
|
||||
# 10 to 17 are the left contour landmarks
|
||||
#18 = # right eyebrow outer-corner
|
||||
#20 = # right eyebrow middle, vertical middle
|
||||
#22 = # right eyebrow inner-corner
|
||||
#23 = # left eyebrow inner-corner
|
||||
#25 = # left eyebrow middle
|
||||
#27 = # left eyebrow outer-corner
|
||||
31 = 8156 # nose-tip
|
||||
#34 = # nose-lip junction
|
||||
37 = 2602 # right eye outer-corner
|
||||
40 = 5830 # right eye inner-corner
|
||||
43 = 10390 # left eye inner-corner
|
||||
46 = 13481 # left eye outer-corner
|
||||
49 = 5522 # right mouth corner
|
||||
50 = 6026 # upper lip right-right top
|
||||
51 = 7355 # upper lip middle-right top
|
||||
52 = 8181 # upper lip middle top
|
||||
53 = 9007 # upper lip middle-left top
|
||||
54 = 10329 # upper lip left-left top
|
||||
55 = 10857 # left mouth corner
|
||||
56 = 9730 #
|
||||
57 = 8670 #
|
||||
58 = 8199 # lower lip middle bottom
|
||||
59 = 7726 #
|
||||
60 = 6898 #
|
||||
61 = 6291 # right inner corner of the mouth
|
||||
62 = 7364 # upper lip right bottom outer
|
||||
63 = 8190 # upper lip middle bottom
|
||||
64 = 9016 # upper lip left bottom outer
|
||||
65 = 10088 # left inner corner of the mouth
|
||||
66 = 8663 # lower lip left top outer
|
||||
67 = 8191 # lower lip middle top
|
||||
68 = 7719 # lower lip right top outer
|
||||
|
||||
|
||||
# Definitions of which 2D landmarks make up the right and left face contours:
|
||||
[contour_landmarks]
|
||||
right = [ 1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8
|
||||
]
|
||||
left = [ 10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17
|
||||
]
|
82
facelib/eos/ibug_to_sfm.txt
Executable file
82
facelib/eos/ibug_to_sfm.txt
Executable file
|
@ -0,0 +1,82 @@
|
|||
# Mapping from the 68-point ibug annotations to the Surrey Face Model (SFM) mesh vertex indices.
|
||||
# Note: Points above vertex id 845 are not defined on the reference and thus not available in all model resolutions.
|
||||
# This file uses TOML syntax (https://github.com/toml-lang/toml).
|
||||
|
||||
# Mappings from input landmarks (ibug, lhs) to output landmarks (SFM, rhs):
|
||||
[landmark_mappings]
|
||||
# 1 to 8 are the right contour landmarks
|
||||
9 = 33 # chin bottom
|
||||
# 10 to 17 are the left contour landmarks
|
||||
18 = 225 # right eyebrow outer-corner (18)
|
||||
19 = 229 # right eyebrow between middle and outer corner
|
||||
20 = 233 # right eyebrow middle, vertical middle (20)
|
||||
21 = 2086 # right eyebrow between middle and inner corner
|
||||
22 = 157 # right eyebrow inner-corner (19)
|
||||
23 = 590 # left eyebrow inner-corner (23)
|
||||
24 = 2091 # left eyebrow between inner corner and middle
|
||||
25 = 666 # left eyebrow middle (24)
|
||||
26 = 662 # left eyebrow between middle and outer corner
|
||||
27 = 658 # left eyebrow outer-corner (22)
|
||||
28 = 2842 # bridge of the nose (parallel to upper eye lids)
|
||||
29 = 379 # middle of the nose, a bit below the lower eye lids
|
||||
30 = 272 # above nose-tip (1cm or so)
|
||||
31 = 114 # nose-tip (3)
|
||||
32 = 100 # right nostril, below nose, nose-lip junction
|
||||
33 = 2794 # nose-lip junction
|
||||
34 = 270 # nose-lip junction (28)
|
||||
35 = 2797 # nose-lip junction
|
||||
36 = 537 # left nostril, below nose, nose-lip junction
|
||||
37 = 177 # right eye outer-corner (1)
|
||||
38 = 172 # right eye pupil top right (from subject's perspective)
|
||||
39 = 191 # right eye pupil top left
|
||||
40 = 181 # right eye inner-corner (5)
|
||||
41 = 173 # right eye pupil bottom left
|
||||
42 = 174 # right eye pupil bottom right
|
||||
43 = 614 # left eye inner-corner (8)
|
||||
44 = 624 # left eye pupil top right
|
||||
45 = 605 # left eye pupil top left
|
||||
46 = 610 # left eye outer-corner (2)
|
||||
47 = 607 # left eye pupil bottom left
|
||||
48 = 606 # left eye pupil bottom right
|
||||
49 = 398 # right mouth corner (12)
|
||||
50 = 315 # upper lip right top outer
|
||||
51 = 413 # upper lip middle top right
|
||||
52 = 329 # upper lip middle top (14)
|
||||
53 = 825 # upper lip middle top left
|
||||
54 = 736 # upper lip left top outer
|
||||
55 = 812 # left mouth corner (13)
|
||||
56 = 841 # lower lip left bottom outer
|
||||
57 = 693 # lower lip middle bottom left
|
||||
58 = 411 # lower lip middle bottom (17)
|
||||
59 = 264 # lower lip middle bottom right
|
||||
60 = 431 # lower lip right bottom outer
|
||||
# 61 not defined - would be right inner corner of the mouth
|
||||
62 = 416 # upper lip right bottom outer
|
||||
63 = 423 # upper lip middle bottom
|
||||
64 = 828 # upper lip left bottom outer
|
||||
# 65 not defined - would be left inner corner of the mouth
|
||||
66 = 817 # lower lip left top outer
|
||||
67 = 442 # lower lip middle top
|
||||
68 = 404 # lower lip right top outer
|
||||
|
||||
|
||||
# Definitions of which 2D landmarks make up the right and left face contours:
|
||||
[contour_landmarks]
|
||||
right = [ 1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8
|
||||
]
|
||||
left = [ 10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17
|
||||
]
|
46
facelib/eos/readme.txt
Executable file
46
facelib/eos/readme.txt
Executable file
|
@ -0,0 +1,46 @@
|
|||
eos: A lightweight header-only 3D Morphable Model fitting library in modern C++11/14
|
||||
=========
|
||||
|
||||
Files in this directory:
|
||||
|
||||
- ibug_to_sfm.txt:
|
||||
Mappings from the popular ibug 68-point 2D facial landmarks markup to
|
||||
Surrey Face Model indices.
|
||||
|
||||
- sfm_shape_3448.bin:
|
||||
The public shape-only Surrey 3D Morphable Face Model.
|
||||
To obtain a full 3DMM and higher resolution levels, follow the instructions
|
||||
at cvssp.org/facemodel.
|
||||
Details about the different models can be found in:
|
||||
"A Multiresolution 3D Morphable Face Model and Fitting Framework",
|
||||
P. Huber, G. Hu, R. Tena, P. Mortazavian, W. Koppen, W. Christmas, M. Rätsch, J. Kittler,
|
||||
VISAPP 2016, Rome, Italy.
|
||||
|
||||
- expression_blendshapes_3448.bin:
|
||||
6 expression blendshapes for the sfm_shape_3448 model. Contains the expressions anger,
|
||||
disgust, fear, happiness, sadness and surprise.
|
||||
|
||||
- sfm_3448_edge_topology.json:
|
||||
Contains a precomputed list of the model's edges, and the two faces and vertices that are
|
||||
adjacent to each edge. Uses 1-based indexing ("0" has a special meaning of "no adjacent
|
||||
vertex/edge") - this may change to 0-based in the future to be consistent with the rest of
|
||||
the library. The file is used in the edge-fitting.
|
||||
|
||||
- sfm_model_contours.json:
|
||||
Definition of the SFM's contour vertices of the right and left side of the face.
|
||||
|
||||
- sfm_reference.obj:
|
||||
The reference 3D shape used to built the Surrey Face Model. We make it available so
|
||||
that new user-defined landmark points can be marked in this lowest-resolution
|
||||
model, if the points exist here.
|
||||
|
||||
- sfm_reference_annotated.obj:
|
||||
Visualisation of the landmark points defined in the ibug_to_sfm.txt mapping file.
|
||||
* Red: Annotated ibug points that are defined on the reference shape.
|
||||
* Green: Contour vertices from the file model_contours.json.
|
||||
The file ibug_to_sfm.txt contains a few more mappings of landmarks that are not present
|
||||
in the reference, for example the middle-inner eyebrow points - they are not visualised.
|
||||
|
||||
- sfm_reference_symmetry.txt:
|
||||
Contains a list of vertex symmetries of the reference shape, i.e. each
|
||||
vertex's symmetric counterpart. See the top of the file for more information.
|
49
facelib/eos/scripts/compute_edgestruct.m
Executable file
49
facelib/eos/scripts/compute_edgestruct.m
Executable file
|
@ -0,0 +1,49 @@
|
|||
%% The code in this file is largely copied and modified from
|
||||
% https://github.com/waps101/3DMM_edges:
|
||||
% A. Bas, W.A.P. Smith, T. Bolkart and S. Wuhrer, "Fitting a 3D Morphable
|
||||
% Model to Edges: A Comparison Between Hard and Soft Correspondences",
|
||||
% ACCV Workshop 2016.
|
||||
% The code is licensed under the Apache-2.0 license.
|
||||
|
||||
%% Read the instructions in share/generate-edgestruct.py for how to use this script.
|
||||
|
||||
function [] = compute_edgestruct(trianglelist_file)
|
||||
load(trianglelist_file); % loads 'triangle_list' from the file
|
||||
num_vertices = max(max(triangle_list)); % we assume that the largest triangle
|
||||
% index that we're going to find is the
|
||||
% number of vertices of the model.
|
||||
% Get the edge list:
|
||||
TR = triangulation(double(triangle_list), ones(num_vertices, 1), ones(num_vertices, 1), ones(num_vertices, 1));
|
||||
Ev = TR.edges; % This should be a list of all the edges.
|
||||
clear TR;
|
||||
Ef = meshFaceEdges(triangle_list, Ev);
|
||||
save('edgestruct.mat', 'Ef', 'Ev'); % Load this file back into generate-edgestruct.py.
|
||||
end
|
||||
|
||||
% This function is copied from:
|
||||
% https://github.com/waps101/3DMM_edges/blob/master/utils/meshFaceEdges.m,
|
||||
% on 3 Oct 2016.
|
||||
function Ef = meshFaceEdges(faces, edges)
|
||||
%MESHFACEEDGES Compute faces adjacent to each edge in the mesh
|
||||
% faces - nverts by 3 matrix of mesh faces
|
||||
% edges - nedges by 2 matrix containing vertices adjacent to each edge
|
||||
%
|
||||
% This function is slow! But it only needs to be run once for a morphable
|
||||
% model and the edge-face list can then be saved
|
||||
|
||||
nedges = size(edges, 1);
|
||||
|
||||
faces = sort(faces, 2);
|
||||
edges = sort(edges, 2);
|
||||
|
||||
disp(' ');
|
||||
for i=1:nedges
|
||||
idx = find(((faces(:,1)==edges(i,1)) & ( (faces(:,2)==edges(i,2)) | (faces(:,3)==edges(i,2)) )) | ((faces(:,2)==edges(i,1)) & (faces(:,3)==edges(i,2))));
|
||||
if length(idx)==1
|
||||
idx = [0 idx];
|
||||
end
|
||||
Ef(i,:)=[idx(1) idx(2)];
|
||||
fprintf('\b\b\b\b\b\b%05.2f%%', i/nedges*100);
|
||||
end
|
||||
|
||||
end
|
55
facelib/eos/scripts/convert-bfm2009-to-eos.py
Executable file
55
facelib/eos/scripts/convert-bfm2009-to-eos.py
Executable file
|
@ -0,0 +1,55 @@
|
|||
import numpy as np
|
||||
import eos
|
||||
import scipy.io
|
||||
|
||||
# This script converts the Basel Face Model 2009 (BFM2009, [1]) to the eos model format,
|
||||
# specifically the file PublicMM1/01_MorphableModel.mat from the BFM2009 distribution.
|
||||
#
|
||||
# The script does not use or convert the segments of the BFM2009, just the global PCA.
|
||||
# The BFM2009 also does not come with texture (uv-) coordinates. If you have texture coordinates for the BFM, they can be
|
||||
# added to the eos.morphablemodel.MorphableModel(...) constructor in the third argument. Note that eos only supports one
|
||||
# uv-coordinate per vertex.
|
||||
#
|
||||
# [1]: A 3D Face Model for Pose and Illumination Invariant Face Recognition,
|
||||
# P. Paysan, R. Knothe, B. Amberg, S. Romdhani, and T. Vetter,
|
||||
# AVSS 2009.
|
||||
# http://faces.cs.unibas.ch/bfm/main.php?nav=1-0&id=basel_face_model
|
||||
|
||||
# Set this to the path of the PublicMM1/01_MorphableModel.mat file from the BFM2009 distribution:
|
||||
bfm2009_path = r"./PublicMM1/01_MorphableModel.mat"
|
||||
bfm2009 = scipy.io.loadmat(bfm2009_path)
|
||||
|
||||
# The PCA shape model:
|
||||
# Note: All the matrices are of type float32, so we're good and don't need to convert anything.
|
||||
shape_mean = bfm2009['shapeMU']
|
||||
shape_orthogonal_pca_basis = bfm2009['shapePC']
|
||||
# Their basis is unit norm: np.linalg.norm(shape_pca_basis[:,0]) == 1.0
|
||||
# And the basis vectors are orthogonal: np.dot(shape_pca_basis[:,0], shape_pca_basis[:,0]) == 1.0
|
||||
# np.dot(shape_pca_basis[:,0], shape_pca_basis[:,1]) == 1e-08
|
||||
shape_pca_standard_deviations = bfm2009['shapeEV'] # These are standard deviations, not eigenvalues!
|
||||
shape_pca_eigenvalues = np.square(shape_pca_standard_deviations)
|
||||
triangle_list = bfm2009['tl'] - 1 # Convert from 1-based Matlab indexing to 0-based C++ indexing
|
||||
# The BFM has front-facing triangles defined the wrong way round (not in accordance with OpenGL) - we swap the indices:
|
||||
for t in triangle_list:
|
||||
t[1], t[2] = t[2], t[1]
|
||||
shape_model = eos.morphablemodel.PcaModel(shape_mean, shape_orthogonal_pca_basis, shape_pca_eigenvalues,
|
||||
triangle_list.tolist())
|
||||
|
||||
# PCA colour model:
|
||||
color_mean = bfm2009['texMU']
|
||||
# The BFM2009's colour data is in the range [0, 255], while the SFM is in [0, 1], so we divide by 255:
|
||||
color_mean /= 255
|
||||
color_orthogonal_pca_basis = bfm2009['texPC']
|
||||
color_pca_standard_deviations = bfm2009['texEV'] # Again, these are standard deviations, not eigenvalues
|
||||
color_pca_standard_deviations /= 255 # Divide the standard deviations by the same amount as the mean
|
||||
color_pca_eigenvalues = np.square(color_pca_standard_deviations)
|
||||
|
||||
color_model = eos.morphablemodel.PcaModel(color_mean, color_orthogonal_pca_basis, color_pca_eigenvalues,
|
||||
triangle_list.tolist())
|
||||
|
||||
# Construct and save the BFM2009 model in the eos format:
|
||||
model = eos.morphablemodel.MorphableModel(shape_model, color_model, vertex_definitions=None,
|
||||
texture_coordinates=[],
|
||||
texture_triangle_indices=[]) # uv-coordinates can be added here
|
||||
eos.morphablemodel.save_model(model, "bfm2009.bin")
|
||||
print("Converted and saved model as bfm2009.bin.")
|
55
facelib/eos/scripts/convert-bfm2017-to-eos.py
Executable file
55
facelib/eos/scripts/convert-bfm2017-to-eos.py
Executable file
|
@ -0,0 +1,55 @@
|
|||
import numpy as np
|
||||
import eos
|
||||
import h5py
|
||||
|
||||
# This script converts the Basel Face Model 2017 (BFM2017, [1]) to the eos model format,
|
||||
# specifically the files model2017-1_face12_nomouth.h5 and model2017-1_bfm_nomouth.h5 from the BFM2017 download.
|
||||
#
|
||||
# The BFM2017 does not come with texture (uv-) coordinates. If you have texture coordinates for the BFM, they can be
|
||||
# added to the eos.morphablemodel.MorphableModel(...) constructor in the third argument. Note that eos only supports one
|
||||
# uv-coordinate per vertex.
|
||||
#
|
||||
# [1]: Morphable Face Models - An Open Framework,
|
||||
# T. Gerig, A. Morel-Forster, C. Blumer, B. Egger, M. Lüthi, S. Schönborn and T. Vetter,
|
||||
# arXiv preprint, 2017.
|
||||
# http://faces.cs.unibas.ch/bfm/bfm2017.html
|
||||
|
||||
# Set this to the path of the model2017-1_bfm_nomouth.h5 or model2017-1_face12_nomouth.h5 file from the BFM2017 download:
|
||||
bfm2017_file = r"./model2017-1_bfm_nomouth.h5"
|
||||
|
||||
with h5py.File(bfm2017_file, 'r') as hf:
|
||||
# The PCA shape model:
|
||||
shape_mean = np.array(hf['shape/model/mean'])
|
||||
shape_orthogonal_pca_basis = np.array(hf['shape/model/pcaBasis'])
|
||||
# Their basis is unit norm: np.linalg.norm(shape_pca_basis[:,0]) == ~1.0
|
||||
# And the basis vectors are orthogonal: np.dot(shape_pca_basis[:,0], shape_pca_basis[:,0]) == 1.0
|
||||
# np.dot(shape_pca_basis[:,0], shape_pca_basis[:,1]) == 1e-10
|
||||
shape_pca_variance = np.array(hf['shape/model/pcaVariance']) # the PCA variances are the eigenvectors
|
||||
|
||||
triangle_list = np.array(hf['shape/representer/cells'])
|
||||
|
||||
shape_model = eos.morphablemodel.PcaModel(shape_mean, shape_orthogonal_pca_basis, shape_pca_variance,
|
||||
triangle_list.transpose().tolist())
|
||||
|
||||
# PCA colour model:
|
||||
color_mean = np.array(hf['color/model/mean'])
|
||||
color_orthogonal_pca_basis = np.array(hf['color/model/pcaBasis'])
|
||||
color_pca_variance = np.array(hf['color/model/pcaVariance'])
|
||||
|
||||
color_model = eos.morphablemodel.PcaModel(color_mean, color_orthogonal_pca_basis, color_pca_variance,
|
||||
triangle_list.transpose().tolist())
|
||||
|
||||
# PCA expression model:
|
||||
expression_mean = np.array(hf['expression/model/mean'])
|
||||
expression_pca_basis = np.array(hf['expression/model/pcaBasis'])
|
||||
expression_pca_variance = np.array(hf['expression/model/pcaVariance'])
|
||||
|
||||
expression_model = eos.morphablemodel.PcaModel(expression_mean, expression_pca_basis, expression_pca_variance,
|
||||
triangle_list.transpose().tolist())
|
||||
|
||||
# Construct and save an eos model from the BFM data:
|
||||
model = eos.morphablemodel.MorphableModel(shape_model, expression_model, color_model, vertex_definitions=None,
|
||||
texture_coordinates=[],
|
||||
texture_triangle_indices=[]) # uv-coordinates can be added here
|
||||
eos.morphablemodel.save_model(model, "bfm2017-1_bfm_nomouth.bin")
|
||||
print("Converted and saved model as bfm2017-1_bfm_nomouth.bin.")
|
30
facelib/eos/scripts/generate-edgestruct.py
Executable file
30
facelib/eos/scripts/generate-edgestruct.py
Executable file
|
@ -0,0 +1,30 @@
|
|||
import numpy as np
|
||||
import eos
|
||||
import scipy.io
|
||||
|
||||
# This script computes an edge_topology.json file for a given model, which is used in eos's contour fitting.
|
||||
# The script can be used for any Morphable Model, for example the SFM, BFM2009, BFM2017, and others.
|
||||
|
||||
# Set this to the path of the model that you want to generate an edgestruct from:
|
||||
model_path = "bfm2017-1_bfm_nomouth.bin"
|
||||
|
||||
# Step 1:
|
||||
# Save the triangle list of the model to Matlab (to be read by Matlab in Step 2):
|
||||
model = eos.morphablemodel.load_model(model_path)
|
||||
triangle_list = np.array(model.get_shape_model().get_triangle_list()) + 1 # add 1 to make 1-based indices for Matlab
|
||||
scipy.io.savemat("bfm2017-1_bfm_nomouth_trianglelist.mat", {'triangle_list': triangle_list})
|
||||
|
||||
# Step 2:
|
||||
# Open Matlab and run compute_edgestruct.m on the generated triangle-list .mat file.
|
||||
# Matlab will save an edgestruct.mat file with face and vertex adjacency information.
|
||||
|
||||
# Step 3:
|
||||
# Load the generated edgestruct.mat from Matlab and save it as an eos EdgeTopology in json format:
|
||||
edgestruct_path = r"edgestruct.mat"
|
||||
edge_info = scipy.io.loadmat(edgestruct_path)
|
||||
Ef = edge_info['Ef']
|
||||
Ev = edge_info['Ev']
|
||||
edge_topology = eos.morphablemodel.EdgeTopology(Ef.tolist(), Ev.tolist())
|
||||
eos.morphablemodel.save_edge_topology(edge_topology, "bfm2017-1_bfm_nomouth_edge_topology.json")
|
||||
|
||||
print("Finished generating edge-topology file and saved it as bfm2017-1_bfm_nomouth_edge_topology.json.")
|
81480
facelib/eos/sfm_3448_edge_topology.json
Executable file
81480
facelib/eos/sfm_3448_edge_topology.json
Executable file
File diff suppressed because it is too large
Load diff
42
facelib/eos/sfm_model_contours.json
Executable file
42
facelib/eos/sfm_model_contours.json
Executable file
|
@ -0,0 +1,42 @@
|
|||
{
|
||||
"model_contour": {
|
||||
"right_contour": [
|
||||
380,
|
||||
373,
|
||||
356,
|
||||
358,
|
||||
359,
|
||||
360,
|
||||
365,
|
||||
363,
|
||||
364,
|
||||
388,
|
||||
391,
|
||||
392,
|
||||
393,
|
||||
11,
|
||||
21,
|
||||
25,
|
||||
22
|
||||
],
|
||||
"left_contour": [
|
||||
795,
|
||||
790,
|
||||
773,
|
||||
775,
|
||||
776,
|
||||
777,
|
||||
782,
|
||||
780,
|
||||
781,
|
||||
802,
|
||||
805,
|
||||
806,
|
||||
807,
|
||||
454,
|
||||
464,
|
||||
466,
|
||||
465
|
||||
]
|
||||
}
|
||||
}
|
2455
facelib/eos/sfm_reference.obj
Executable file
2455
facelib/eos/sfm_reference.obj
Executable file
File diff suppressed because it is too large
Load diff
2455
facelib/eos/sfm_reference_annotated.obj
Executable file
2455
facelib/eos/sfm_reference_annotated.obj
Executable file
File diff suppressed because it is too large
Load diff
845
facelib/eos/sfm_reference_symmetry.txt
Executable file
845
facelib/eos/sfm_reference_symmetry.txt
Executable file
|
@ -0,0 +1,845 @@
|
|||
445 # Note: These references are generated from Matlab and 1-index-based. For example, the first line (index 1) maps to its symmetric counterpart vertex index 445 (also 1-based).
|
||||
446
|
||||
447
|
||||
448
|
||||
449
|
||||
450
|
||||
451
|
||||
8
|
||||
452
|
||||
453
|
||||
454
|
||||
455
|
||||
456
|
||||
457
|
||||
458
|
||||
459
|
||||
460
|
||||
461
|
||||
462
|
||||
463
|
||||
464
|
||||
465
|
||||
466
|
||||
24
|
||||
25
|
||||
467
|
||||
468
|
||||
469
|
||||
470
|
||||
471
|
||||
472
|
||||
473
|
||||
33
|
||||
34
|
||||
35
|
||||
36
|
||||
474
|
||||
475
|
||||
476
|
||||
477
|
||||
478
|
||||
479
|
||||
480
|
||||
481
|
||||
482
|
||||
483
|
||||
484
|
||||
485
|
||||
486
|
||||
487
|
||||
488
|
||||
489
|
||||
490
|
||||
491
|
||||
492
|
||||
493
|
||||
494
|
||||
495
|
||||
496
|
||||
497
|
||||
498
|
||||
499
|
||||
500
|
||||
501
|
||||
502
|
||||
503
|
||||
504
|
||||
505
|
||||
506
|
||||
507
|
||||
508
|
||||
509
|
||||
510
|
||||
511
|
||||
512
|
||||
513
|
||||
514
|
||||
515
|
||||
516
|
||||
517
|
||||
518
|
||||
519
|
||||
520
|
||||
521
|
||||
522
|
||||
523
|
||||
524
|
||||
525
|
||||
526
|
||||
527
|
||||
528
|
||||
529
|
||||
530
|
||||
531
|
||||
532
|
||||
533
|
||||
534
|
||||
535
|
||||
536
|
||||
537
|
||||
538
|
||||
539
|
||||
103
|
||||
540
|
||||
541
|
||||
542
|
||||
543
|
||||
544
|
||||
545
|
||||
546
|
||||
547
|
||||
548
|
||||
549
|
||||
550
|
||||
115
|
||||
551
|
||||
552
|
||||
553
|
||||
554
|
||||
555
|
||||
556
|
||||
557
|
||||
558
|
||||
559
|
||||
560
|
||||
561
|
||||
562
|
||||
563
|
||||
564
|
||||
130
|
||||
565
|
||||
566
|
||||
567
|
||||
568
|
||||
569
|
||||
570
|
||||
571
|
||||
572
|
||||
573
|
||||
574
|
||||
141
|
||||
575
|
||||
576
|
||||
577
|
||||
578
|
||||
579
|
||||
580
|
||||
581
|
||||
582
|
||||
583
|
||||
584
|
||||
585
|
||||
586
|
||||
587
|
||||
588
|
||||
589
|
||||
590
|
||||
591
|
||||
592
|
||||
593
|
||||
594
|
||||
595
|
||||
596
|
||||
597
|
||||
598
|
||||
599
|
||||
600
|
||||
601
|
||||
602
|
||||
603
|
||||
604
|
||||
605
|
||||
606
|
||||
607
|
||||
608
|
||||
609
|
||||
610
|
||||
611
|
||||
612
|
||||
613
|
||||
614
|
||||
615
|
||||
616
|
||||
617
|
||||
618
|
||||
619
|
||||
620
|
||||
621
|
||||
622
|
||||
623
|
||||
624
|
||||
625
|
||||
626
|
||||
627
|
||||
628
|
||||
629
|
||||
630
|
||||
631
|
||||
632
|
||||
633
|
||||
634
|
||||
635
|
||||
636
|
||||
637
|
||||
638
|
||||
639
|
||||
640
|
||||
641
|
||||
642
|
||||
643
|
||||
644
|
||||
645
|
||||
646
|
||||
647
|
||||
648
|
||||
649
|
||||
650
|
||||
651
|
||||
652
|
||||
653
|
||||
654
|
||||
655
|
||||
656
|
||||
657
|
||||
658
|
||||
659
|
||||
660
|
||||
661
|
||||
662
|
||||
663
|
||||
664
|
||||
665
|
||||
666
|
||||
667
|
||||
668
|
||||
669
|
||||
670
|
||||
671
|
||||
672
|
||||
673
|
||||
674
|
||||
675
|
||||
243
|
||||
676
|
||||
677
|
||||
678
|
||||
679
|
||||
680
|
||||
681
|
||||
682
|
||||
683
|
||||
684
|
||||
685
|
||||
686
|
||||
687
|
||||
256
|
||||
257
|
||||
688
|
||||
689
|
||||
690
|
||||
261
|
||||
691
|
||||
692
|
||||
693
|
||||
694
|
||||
695
|
||||
696
|
||||
697
|
||||
698
|
||||
699
|
||||
271
|
||||
272
|
||||
273
|
||||
700
|
||||
701
|
||||
702
|
||||
703
|
||||
704
|
||||
705
|
||||
706
|
||||
707
|
||||
708
|
||||
709
|
||||
710
|
||||
711
|
||||
712
|
||||
713
|
||||
714
|
||||
715
|
||||
290
|
||||
716
|
||||
717
|
||||
718
|
||||
719
|
||||
720
|
||||
296
|
||||
297
|
||||
298
|
||||
299
|
||||
721
|
||||
722
|
||||
723
|
||||
724
|
||||
725
|
||||
726
|
||||
727
|
||||
728
|
||||
729
|
||||
730
|
||||
731
|
||||
732
|
||||
733
|
||||
734
|
||||
735
|
||||
736
|
||||
737
|
||||
738
|
||||
739
|
||||
740
|
||||
741
|
||||
742
|
||||
743
|
||||
744
|
||||
745
|
||||
746
|
||||
747
|
||||
748
|
||||
749
|
||||
750
|
||||
330
|
||||
331
|
||||
332
|
||||
333
|
||||
751
|
||||
752
|
||||
753
|
||||
754
|
||||
755
|
||||
756
|
||||
757
|
||||
758
|
||||
759
|
||||
760
|
||||
761
|
||||
762
|
||||
763
|
||||
764
|
||||
765
|
||||
766
|
||||
767
|
||||
768
|
||||
769
|
||||
770
|
||||
771
|
||||
772
|
||||
773
|
||||
774
|
||||
775
|
||||
776
|
||||
777
|
||||
778
|
||||
779
|
||||
780
|
||||
781
|
||||
782
|
||||
783
|
||||
784
|
||||
785
|
||||
786
|
||||
787
|
||||
788
|
||||
789
|
||||
790
|
||||
791
|
||||
792
|
||||
793
|
||||
794
|
||||
795
|
||||
379
|
||||
380
|
||||
796
|
||||
797
|
||||
798
|
||||
799
|
||||
800
|
||||
801
|
||||
802
|
||||
388
|
||||
803
|
||||
804
|
||||
805
|
||||
806
|
||||
807
|
||||
808
|
||||
809
|
||||
810
|
||||
811
|
||||
812
|
||||
813
|
||||
814
|
||||
815
|
||||
402
|
||||
816
|
||||
817
|
||||
818
|
||||
819
|
||||
820
|
||||
821
|
||||
822
|
||||
823
|
||||
824
|
||||
412
|
||||
825
|
||||
826
|
||||
827
|
||||
828
|
||||
829
|
||||
830
|
||||
831
|
||||
832
|
||||
833
|
||||
834
|
||||
835
|
||||
424
|
||||
425
|
||||
836
|
||||
837
|
||||
838
|
||||
839
|
||||
840
|
||||
841
|
||||
842
|
||||
433
|
||||
843
|
||||
844
|
||||
845
|
||||
437
|
||||
438
|
||||
439
|
||||
440
|
||||
441
|
||||
442
|
||||
443
|
||||
444
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
9
|
||||
10
|
||||
11
|
||||
12
|
||||
13
|
||||
14
|
||||
15
|
||||
16
|
||||
17
|
||||
18
|
||||
19
|
||||
20
|
||||
21
|
||||
22
|
||||
23
|
||||
26
|
||||
27
|
||||
28
|
||||
29
|
||||
30
|
||||
31
|
||||
32
|
||||
37
|
||||
38
|
||||
39
|
||||
40
|
||||
41
|
||||
42
|
||||
43
|
||||
44
|
||||
45
|
||||
46
|
||||
47
|
||||
48
|
||||
49
|
||||
50
|
||||
51
|
||||
52
|
||||
53
|
||||
54
|
||||
55
|
||||
56
|
||||
57
|
||||
58
|
||||
59
|
||||
60
|
||||
61
|
||||
62
|
||||
63
|
||||
64
|
||||
65
|
||||
66
|
||||
67
|
||||
68
|
||||
69
|
||||
70
|
||||
71
|
||||
72
|
||||
73
|
||||
74
|
||||
75
|
||||
76
|
||||
77
|
||||
78
|
||||
79
|
||||
80
|
||||
81
|
||||
82
|
||||
83
|
||||
84
|
||||
85
|
||||
86
|
||||
87
|
||||
88
|
||||
89
|
||||
90
|
||||
91
|
||||
92
|
||||
93
|
||||
94
|
||||
95
|
||||
96
|
||||
97
|
||||
98
|
||||
99
|
||||
100
|
||||
101
|
||||
102
|
||||
104
|
||||
105
|
||||
106
|
||||
107
|
||||
108
|
||||
109
|
||||
110
|
||||
111
|
||||
112
|
||||
113
|
||||
114
|
||||
116
|
||||
117
|
||||
118
|
||||
119
|
||||
120
|
||||
121
|
||||
122
|
||||
123
|
||||
124
|
||||
125
|
||||
126
|
||||
127
|
||||
128
|
||||
129
|
||||
131
|
||||
132
|
||||
133
|
||||
134
|
||||
135
|
||||
136
|
||||
137
|
||||
138
|
||||
139
|
||||
140
|
||||
142
|
||||
143
|
||||
144
|
||||
145
|
||||
146
|
||||
147
|
||||
148
|
||||
149
|
||||
150
|
||||
151
|
||||
152
|
||||
153
|
||||
154
|
||||
155
|
||||
156
|
||||
157
|
||||
158
|
||||
159
|
||||
160
|
||||
161
|
||||
162
|
||||
163
|
||||
164
|
||||
165
|
||||
166
|
||||
167
|
||||
168
|
||||
169
|
||||
170
|
||||
171
|
||||
172
|
||||
173
|
||||
174
|
||||
175
|
||||
176
|
||||
177
|
||||
178
|
||||
179
|
||||
180
|
||||
181
|
||||
182
|
||||
183
|
||||
184
|
||||
185
|
||||
186
|
||||
187
|
||||
188
|
||||
189
|
||||
190
|
||||
191
|
||||
192
|
||||
193
|
||||
194
|
||||
195
|
||||
196
|
||||
197
|
||||
198
|
||||
199
|
||||
200
|
||||
201
|
||||
202
|
||||
203
|
||||
204
|
||||
205
|
||||
206
|
||||
207
|
||||
208
|
||||
209
|
||||
210
|
||||
211
|
||||
212
|
||||
213
|
||||
214
|
||||
215
|
||||
216
|
||||
217
|
||||
218
|
||||
219
|
||||
220
|
||||
221
|
||||
222
|
||||
223
|
||||
224
|
||||
225
|
||||
226
|
||||
227
|
||||
228
|
||||
229
|
||||
230
|
||||
231
|
||||
232
|
||||
233
|
||||
234
|
||||
235
|
||||
236
|
||||
237
|
||||
238
|
||||
239
|
||||
240
|
||||
241
|
||||
242
|
||||
244
|
||||
245
|
||||
246
|
||||
247
|
||||
248
|
||||
249
|
||||
250
|
||||
251
|
||||
252
|
||||
253
|
||||
254
|
||||
255
|
||||
258
|
||||
259
|
||||
260
|
||||
262
|
||||
263
|
||||
264
|
||||
265
|
||||
266
|
||||
267
|
||||
268
|
||||
269
|
||||
270
|
||||
274
|
||||
275
|
||||
276
|
||||
277
|
||||
278
|
||||
279
|
||||
280
|
||||
281
|
||||
282
|
||||
283
|
||||
284
|
||||
285
|
||||
286
|
||||
287
|
||||
288
|
||||
289
|
||||
291
|
||||
292
|
||||
293
|
||||
294
|
||||
295
|
||||
300
|
||||
301
|
||||
302
|
||||
303
|
||||
304
|
||||
305
|
||||
306
|
||||
307
|
||||
308
|
||||
309
|
||||
310
|
||||
311
|
||||
312
|
||||
313
|
||||
314
|
||||
315
|
||||
316
|
||||
317
|
||||
318
|
||||
319
|
||||
320
|
||||
321
|
||||
322
|
||||
323
|
||||
324
|
||||
325
|
||||
326
|
||||
327
|
||||
328
|
||||
329
|
||||
334
|
||||
335
|
||||
336
|
||||
337
|
||||
338
|
||||
339
|
||||
340
|
||||
341
|
||||
342
|
||||
343
|
||||
344
|
||||
345
|
||||
346
|
||||
347
|
||||
348
|
||||
349
|
||||
350
|
||||
351
|
||||
352
|
||||
353
|
||||
354
|
||||
355
|
||||
356
|
||||
357
|
||||
358
|
||||
359
|
||||
360
|
||||
361
|
||||
362
|
||||
363
|
||||
364
|
||||
365
|
||||
366
|
||||
367
|
||||
368
|
||||
369
|
||||
370
|
||||
371
|
||||
372
|
||||
373
|
||||
374
|
||||
375
|
||||
376
|
||||
377
|
||||
378
|
||||
381
|
||||
382
|
||||
383
|
||||
384
|
||||
385
|
||||
386
|
||||
387
|
||||
389
|
||||
390
|
||||
391
|
||||
392
|
||||
393
|
||||
394
|
||||
395
|
||||
396
|
||||
397
|
||||
398
|
||||
399
|
||||
400
|
||||
401
|
||||
403
|
||||
404
|
||||
405
|
||||
406
|
||||
407
|
||||
408
|
||||
409
|
||||
410
|
||||
411
|
||||
413
|
||||
414
|
||||
415
|
||||
416
|
||||
417
|
||||
418
|
||||
419
|
||||
420
|
||||
421
|
||||
422
|
||||
423
|
||||
426
|
||||
427
|
||||
428
|
||||
429
|
||||
430
|
||||
431
|
||||
432
|
||||
434
|
||||
435
|
||||
436
|
BIN
facelib/eos/sfm_shape_3448.bin
Executable file
BIN
facelib/eos/sfm_shape_3448.bin
Executable file
Binary file not shown.
117
facelib/test/FacialMesh.py
Normal file
117
facelib/test/FacialMesh.py
Normal file
|
@ -0,0 +1,117 @@
|
|||
import random
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from facelib.FacialMesh import get_mesh_landmarks
|
||||
from nnlib import nnlib
|
||||
from facelib import LandmarksExtractor, S3FDExtractor
|
||||
from samplelib import SampleLoader, SampleType
|
||||
|
||||
|
||||
class MyTestCase(unittest.TestCase):
|
||||
def test_something(self):
|
||||
t0 = time.time()
|
||||
source_image = cv2.imread('../../imagelib/test/test_src/carrey/carrey.jpg')
|
||||
print(time.time() - t0, 'loaded image')
|
||||
print('source_image type:', source_image.dtype)
|
||||
print('source_image shape:', source_image.shape)
|
||||
im = np.copy(source_image)
|
||||
|
||||
device_config = nnlib.DeviceConfig(cpu_only=True)
|
||||
nnlib.import_all(device_config)
|
||||
landmark_extractor = LandmarksExtractor(nnlib.keras)
|
||||
s3fd_extractor = S3FDExtractor()
|
||||
|
||||
rects = s3fd_extractor.extract(input_image=im, is_bgr=True)
|
||||
print('rects:', rects)
|
||||
bbox = rects[0] # bounding box
|
||||
l, t, r, b = bbox
|
||||
|
||||
print(time.time() - t0, 'got bbox')
|
||||
landmark_extractor.__enter__()
|
||||
s3fd_extractor.__enter__()
|
||||
|
||||
landmarks = landmark_extractor.extract(input_image=im, rects=rects, second_pass_extractor=s3fd_extractor,
|
||||
is_bgr=True)[-1]
|
||||
s3fd_extractor.__exit__()
|
||||
landmark_extractor.__exit__()
|
||||
print(time.time() - t0, 'got landmarks')
|
||||
print('landmarks shape:', np.shape(landmarks))
|
||||
|
||||
mesh_points, isomap, mask = get_mesh_landmarks(landmarks, im)
|
||||
print(time.time() - t0, 'got mesh')
|
||||
print('mesh_points:', np.shape(mesh_points))
|
||||
|
||||
cv2.namedWindow('test output', cv2.WINDOW_NORMAL)
|
||||
|
||||
# Draw the bounding box
|
||||
cv2.rectangle(im, (l, t), (r, b), (0, 0, 255), thickness=2)
|
||||
|
||||
for i, pt in enumerate(mesh_points):
|
||||
cv2.circle(im, (int(pt[0]), int(pt[1])), 1, (255, 255, 255), thickness=-1)
|
||||
|
||||
# Draw the landmarks
|
||||
for i, pt in enumerate(landmarks):
|
||||
cv2.circle(im, (int(pt[0]), int(pt[1])), 3, (0, 255, 0), thickness=-1)
|
||||
|
||||
cv2.imshow('test output', im)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.imshow('test output', isomap.transpose([1, 0, 2]))
|
||||
cv2.waitKey(0)
|
||||
|
||||
im = np.copy(source_image).astype(np.float32) / 255.0
|
||||
|
||||
cv2.imshow('test output', mask)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.imshow('test output', mask * im)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
def test_compare_hull_mask_with_mesh_mask(self):
|
||||
src_samples = SampleLoader.load(SampleType.FACE, '../../imagelib/test/test_dst', None)
|
||||
|
||||
sample_grid = self.get_sample_grid(src_samples)
|
||||
display_grid = []
|
||||
for sample_row in sample_grid:
|
||||
display_row = []
|
||||
for sample in sample_row:
|
||||
src_img = sample.load_bgr()
|
||||
src_hull_mask = sample.load_image_hull_mask()
|
||||
src_mesh_mask = sample.load_image_mesh_mask()
|
||||
|
||||
results = np.concatenate((src_img, src_hull_mask * src_img, src_mesh_mask * src_img), axis=1)
|
||||
display_row.append(results)
|
||||
display_grid.append(np.concatenate(display_row, axis=1))
|
||||
output_grid = np.concatenate(display_grid, axis=0)
|
||||
|
||||
cv2.namedWindow('test output', cv2.WINDOW_NORMAL)
|
||||
cv2.imshow('test output', output_grid)
|
||||
cv2.waitKey(0)
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
@staticmethod
|
||||
def get_sample_grid(src_samples):
|
||||
pitch_yaw = np.array([[sample.pitch_yaw_roll[0], sample.pitch_yaw_roll[1]] for sample in src_samples])
|
||||
pitch_yaw = (pitch_yaw - np.mean(pitch_yaw, axis=0)) / np.std(pitch_yaw, axis=0)
|
||||
|
||||
grid = [[[1, 1], [1, 0], [1, -1]],
|
||||
[[0, 1], [0, 0], [0, -1]],
|
||||
[[-1, 1], [-1, 0], [-1, -1]]]
|
||||
|
||||
grid_samples = []
|
||||
for row in grid:
|
||||
row_samples = []
|
||||
for item in row:
|
||||
row_samples.append(src_samples[np.sum(np.square(np.abs(pitch_yaw - item)), 1).argmin()])
|
||||
grid_samples.append(row_samples)
|
||||
return grid_samples
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
102
facelib/test/LandmarksExtractor.py
Normal file
102
facelib/test/LandmarksExtractor.py
Normal file
|
@ -0,0 +1,102 @@
|
|||
import unittest
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from mainscripts.Extractor import ExtractSubprocessor
|
||||
from nnlib import nnlib
|
||||
from facelib import LandmarksExtractor, S3FDExtractor
|
||||
|
||||
|
||||
class LandmarkExtractorTest(unittest.TestCase):
|
||||
def test_extract(self):
|
||||
im = cv2.imread('../../imagelib/test/test_src/carrey/carrey.jpg')
|
||||
h, w, _ = im.shape
|
||||
|
||||
device_config = nnlib.DeviceConfig(cpu_only=True)
|
||||
nnlib.import_all(device_config)
|
||||
landmark_extractor = LandmarksExtractor(nnlib.keras)
|
||||
s3fd_extractor = S3FDExtractor()
|
||||
|
||||
rects = s3fd_extractor.extract(input_image=im, is_bgr=True)
|
||||
print('rects:', rects)
|
||||
l, t, r, b = rects[0]
|
||||
|
||||
landmark_extractor.__enter__()
|
||||
# landmarks = landmark_extractor.extract(input_image=im, rects=rects, second_pass_extractor=None,
|
||||
# is_bgr=True)
|
||||
s3fd_extractor.__enter__()
|
||||
landmarks = landmark_extractor.extract(input_image=im, rects=rects, second_pass_extractor=s3fd_extractor,
|
||||
is_bgr=True)[-1]
|
||||
s3fd_extractor.__exit__()
|
||||
landmark_extractor.__exit__()
|
||||
|
||||
# print('landmarks', list(landmarks))
|
||||
|
||||
cv2.namedWindow('test output', cv2.WINDOW_NORMAL)
|
||||
cv2.imshow('test output', im)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.rectangle(im, (l, t), (r, b), (255, 255, 0))
|
||||
cv2.imshow('test output', im)
|
||||
cv2.waitKey(0)
|
||||
|
||||
font_face = cv2.FONT_HERSHEY_SIMPLEX
|
||||
font_scale = 0.25
|
||||
|
||||
def pt(arr=None, x=None, y=None):
|
||||
if x and y:
|
||||
return int(x), int(y)
|
||||
else:
|
||||
return int(arr[0]), int(arr[1])
|
||||
|
||||
for i, m in enumerate(landmarks):
|
||||
print(i, m)
|
||||
cv2.circle(im, pt(m), 3, (0, 255, 0), thickness=-1)
|
||||
cv2.putText(im, str(i), pt(m), font_face, font_scale, (0, 255, 0), thickness=1)
|
||||
cv2.imshow('test output', im)
|
||||
cv2.waitKey(0)
|
||||
|
||||
l_eyebrow = np.mean(landmarks[17:22, :], axis=0)
|
||||
r_eyebrow = np.mean(landmarks[22:27, :], axis=0)
|
||||
print(l_eyebrow, r_eyebrow)
|
||||
cv2.circle(im, pt(l_eyebrow), 5, (0, 0, 255))
|
||||
cv2.circle(im, pt(r_eyebrow), 5, (0, 0, 255))
|
||||
|
||||
c_brow = np.mean([l_eyebrow, r_eyebrow], axis=0)
|
||||
brow_slope = (r_eyebrow[1] - l_eyebrow[1]) / (r_eyebrow[0] - l_eyebrow[0])
|
||||
l_brow_line = c_brow - np.array([1000, 1000 * brow_slope])
|
||||
r_brow_line = c_brow + np.array([1000, 1000 * brow_slope])
|
||||
cv2.line(im, pt(l_brow_line), pt(r_brow_line), (0, 0, 255), thickness=4)
|
||||
|
||||
cv2.circle(im, pt(c_brow), 5, (0, 0, 255))
|
||||
nose = np.mean([landmarks[31], landmarks[35]], axis=0)
|
||||
cv2.circle(im, pt(nose), 5, (0, 0, 255))
|
||||
|
||||
nose_brow_slope = (c_brow[1] - nose[1]) / (c_brow[0] - nose[0])
|
||||
t_nose_brow_line = c_brow - np.array([100, 100 * nose_brow_slope])
|
||||
b_nose_brow_line = c_brow + np.array([100, 100 * nose_brow_slope])
|
||||
cv2.line(im, pt(b_nose_brow_line), pt(t_nose_brow_line), (0, 0, 255), thickness=4)
|
||||
|
||||
l_nose_line = nose - np.array([100, 100 * brow_slope])
|
||||
r_nose_line = nose + np.array([100, 100 * brow_slope])
|
||||
print(l_nose_line, r_nose_line)
|
||||
cv2.line(im, pt(l_nose_line), pt(r_nose_line), (0, 0, 255), thickness=1)
|
||||
|
||||
c_forehead = c_brow - (nose - c_brow)
|
||||
cv2.circle(im, pt(c_forehead), 5, (0, 0, 255))
|
||||
l_forehead_line = c_forehead - np.array([100, 100 * brow_slope])
|
||||
r_forehead_line = c_forehead + np.array([100, 100 * brow_slope])
|
||||
cv2.line(im, pt(l_forehead_line), pt(r_forehead_line), (0, 0, 255), thickness=1)
|
||||
|
||||
def mirrorUsingLine(pts, line_pt1, line_pt2):
|
||||
pass
|
||||
|
||||
cv2.imshow('test output', im)
|
||||
cv2.waitKey(0)
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
55
facelib/test/LandmarksProcessor.py
Normal file
55
facelib/test/LandmarksProcessor.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import unittest
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from facelib.LandmarksProcessor import draw_landmarks
|
||||
from samplelib import SampleLoader, SampleType
|
||||
|
||||
|
||||
class LandmarksProcessorTests(unittest.TestCase):
|
||||
def test_algorithms(self):
|
||||
src_samples = SampleLoader.load(SampleType.FACE, '../../imagelib/test/test_dst', None)
|
||||
|
||||
grid = []
|
||||
for src_sample in src_samples:
|
||||
src_img = src_sample.load_bgr()
|
||||
src_mask = src_sample.load_image_hull_mask()
|
||||
src_landmarks = src_sample.landmarks
|
||||
draw_landmarks(src_img, src_landmarks)
|
||||
results = np.concatenate((src_img, src_mask*src_img), axis=1)
|
||||
grid.append(results)
|
||||
|
||||
cv2.namedWindow('test output', cv2.WINDOW_NORMAL)
|
||||
for g in grid:
|
||||
print(np.shape(g))
|
||||
cv2.imshow('test output', np.concatenate(grid, axis=0))
|
||||
cv2.waitKey(0)
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
def test_plot_landmarks_algorithms(self):
|
||||
src_samples = SampleLoader.load(SampleType.FACE, '../../imagelib/test/test_src', None)
|
||||
|
||||
grid = []
|
||||
for src_sample in src_samples:
|
||||
src_img = src_sample.load_bgr()
|
||||
src_mask = src_sample.load_image_hull_mask()
|
||||
src_landmarks = src_sample.landmarks
|
||||
print('landmarks:', src_landmarks)
|
||||
for landmark in src_landmarks:
|
||||
landmark = np.array(landmark, dtype=np.int)
|
||||
cv2.circle(src_img, tuple(landmark), 3, (0,0,255))
|
||||
results = np.concatenate((src_img, src_mask*src_img), axis=1)
|
||||
grid.append(results)
|
||||
|
||||
cv2.namedWindow('test output', cv2.WINDOW_NORMAL)
|
||||
for g in grid:
|
||||
print(np.shape(g))
|
||||
cv2.imshow('test output', np.concatenate(grid, axis=0))
|
||||
cv2.waitKey(0)
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -3,5 +3,6 @@ call ..\setenv.bat
|
|||
|
||||
python -m pip install Flask==1.1.1
|
||||
python -m pip install flask-socketio==4.2.1
|
||||
python -m pip install bin/eos_py-1.1.2-cp36-cp36m-win_amd64.whl
|
||||
|
||||
pause
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue