mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-08 05:51:40 -07:00
removing trailing spaces
This commit is contained in:
parent
fa4e579b95
commit
a3df04999c
61 changed files with 2110 additions and 2103 deletions
|
@ -10,38 +10,38 @@ class LandmarksExtractor(object):
|
|||
def __init__ (self, keras):
|
||||
self.keras = keras
|
||||
K = self.keras.backend
|
||||
|
||||
def __enter__(self):
|
||||
|
||||
def __enter__(self):
|
||||
keras_model_path = Path(__file__).parent / "2DFAN-4.h5"
|
||||
if not keras_model_path.exists():
|
||||
return None
|
||||
|
||||
self.keras_model = self.keras.models.load_model (str(keras_model_path))
|
||||
|
||||
self.keras_model = self.keras.models.load_model (str(keras_model_path))
|
||||
|
||||
return self
|
||||
|
||||
|
||||
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
|
||||
del self.keras_model
|
||||
return False #pass exception between __enter__ and __exit__ to outter level
|
||||
|
||||
|
||||
def extract_from_bgr (self, input_image, rects, second_pass_extractor=None):
|
||||
input_image = input_image[:,:,::-1].copy()
|
||||
(h, w, ch) = input_image.shape
|
||||
|
||||
|
||||
landmarks = []
|
||||
for (left, top, right, bottom) in rects:
|
||||
try:
|
||||
center = np.array( [ (left + right) / 2.0, (top + bottom) / 2.0] )
|
||||
#center[1] -= (bottom - top) * 0.12
|
||||
scale = (right - left + bottom - top) / 195.0
|
||||
|
||||
|
||||
image = self.crop(input_image, center, scale).astype(np.float32)
|
||||
image = np.expand_dims(image, 0)
|
||||
|
||||
|
||||
predicted = self.keras_model.predict (image).transpose (0,3,1,2)
|
||||
|
||||
|
||||
pts_img = self.get_pts_from_predict ( predicted[-1], center, scale)
|
||||
pts_img = [ ( int(pt[0]), int(pt[1]) ) for pt in pts_img ]
|
||||
pts_img = [ ( int(pt[0]), int(pt[1]) ) for pt in pts_img ]
|
||||
landmarks.append ( ( (left, top, right, bottom),pts_img ) )
|
||||
except Exception as e:
|
||||
landmarks.append ( ( (left, top, right, bottom), None ) )
|
||||
|
@ -52,26 +52,26 @@ class LandmarksExtractor(object):
|
|||
rect, lmrks = landmarks[i]
|
||||
if lmrks is None:
|
||||
continue
|
||||
|
||||
|
||||
image_to_face_mat = LandmarksProcessor.get_transform_mat (lmrks, 256, FaceType.FULL)
|
||||
face_image = cv2.warpAffine(input_image, image_to_face_mat, (256, 256), cv2.INTER_CUBIC)
|
||||
|
||||
|
||||
rects2 = second_pass_extractor.extract_from_bgr(face_image)
|
||||
if len(rects2) != 1: #dont do second pass if more than 1 or zero faces detected in cropped image
|
||||
continue
|
||||
|
||||
|
||||
rect2 = rects2[0]
|
||||
|
||||
|
||||
lmrks2 = self.extract_from_bgr (face_image, [rect2] )[0][1]
|
||||
source_lmrks2 = LandmarksProcessor.transform_points (lmrks2, image_to_face_mat, True)
|
||||
source_lmrks2 = LandmarksProcessor.transform_points (lmrks2, image_to_face_mat, True)
|
||||
landmarks[i] = (rect, source_lmrks2)
|
||||
except:
|
||||
continue
|
||||
|
||||
|
||||
return landmarks
|
||||
|
||||
def transform(self, point, center, scale, resolution):
|
||||
pt = np.array ( [point[0], point[1], 1.0] )
|
||||
pt = np.array ( [point[0], point[1], 1.0] )
|
||||
h = 200.0 * scale
|
||||
m = np.eye(3)
|
||||
m[0,0] = resolution / h
|
||||
|
@ -80,11 +80,11 @@ class LandmarksExtractor(object):
|
|||
m[1,2] = resolution * ( -center[1] / h + 0.5 )
|
||||
m = np.linalg.inv(m)
|
||||
return np.matmul (m, pt)[0:2]
|
||||
|
||||
|
||||
def crop(self, image, center, scale, resolution=256.0):
|
||||
ul = self.transform([1, 1], center, scale, resolution).astype( np.int )
|
||||
br = self.transform([resolution, resolution], center, scale, resolution).astype( np.int )
|
||||
|
||||
|
||||
if image.ndim > 2:
|
||||
newDim = np.array([br[1] - ul[1], br[0] - ul[0], image.shape[2]], dtype=np.int32)
|
||||
newImg = np.zeros(newDim, dtype=np.uint8)
|
||||
|
@ -98,14 +98,14 @@ class LandmarksExtractor(object):
|
|||
oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32)
|
||||
oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32)
|
||||
newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1] ] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :]
|
||||
|
||||
|
||||
newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)), interpolation=cv2.INTER_LINEAR)
|
||||
return newImg
|
||||
|
||||
|
||||
def get_pts_from_predict(self, a, center, scale):
|
||||
b = a.reshape ( (a.shape[0], a.shape[1]*a.shape[2]) )
|
||||
b = a.reshape ( (a.shape[0], a.shape[1]*a.shape[2]) )
|
||||
c = b.argmax(1).reshape ( (a.shape[0], 1) ).repeat(2, axis=1).astype(np.float)
|
||||
c[:,0] %= a.shape[2]
|
||||
c[:,0] %= a.shape[2]
|
||||
c[:,1] = np.apply_along_axis ( lambda x: np.floor(x / a.shape[2]), 0, c[:,1] )
|
||||
|
||||
for i in range(a.shape[0]):
|
||||
|
@ -113,6 +113,6 @@ class LandmarksExtractor(object):
|
|||
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
||||
diff = np.array ( [a[i,pY,pX+1]-a[i,pY,pX-1], a[i,pY+1,pX]-a[i,pY-1,pX]] )
|
||||
c[i] += np.sign(diff)*0.25
|
||||
|
||||
|
||||
c += 0.5
|
||||
return [ self.transform (c[i], center, scale, a.shape[2]) for i in range(a.shape[0]) ]
|
||||
return [ self.transform (c[i], center, scale, a.shape[2]) for i in range(a.shape[0]) ]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue