mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 13:32:09 -07:00
S3FD : filtering faces < 40pix.
This commit is contained in:
parent
8da47fec13
commit
62af1b6f16
3 changed files with 27 additions and 12 deletions
|
@ -90,7 +90,7 @@ class MTCExtractor(object):
|
|||
input_image = input_image[:,:,::-1].copy()
|
||||
(h, w, ch) = input_image.shape
|
||||
|
||||
input_scale = self.scale_to / (w if w > h else h)
|
||||
input_scale = self.scale_to / max(w,h)
|
||||
input_image = cv2.resize (input_image, ( int(w*input_scale), int(h*input_scale) ), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
detected_faces, pnts = detect_face ( input_image, self.min_face_size, self.pnet_fun, self.rnet_fun, self.onet_fun, [ self.thresh1, self.thresh2, self.thresh3 ], self.scale_factor )
|
||||
|
|
|
@ -5,8 +5,6 @@ from nnlib import nnlib
|
|||
|
||||
class S3FDExtractor(object):
|
||||
def __init__(self):
|
||||
self.scale_factor = 3
|
||||
|
||||
exec( nnlib.import_all(), locals(), globals() )
|
||||
|
||||
model_path = Path(__file__).parent / "S3FD.h5"
|
||||
|
@ -25,24 +23,30 @@ class S3FDExtractor(object):
|
|||
input_image = input_image[:,:,::-1].copy()
|
||||
(h, w, ch) = input_image.shape
|
||||
|
||||
d = w if w > h else h
|
||||
input_scale = d / ( (d // self.scale_factor) - (d % self.scale_factor) )
|
||||
d = max(w, h)
|
||||
scale_to = 640 if d >= 1280 else d / 2
|
||||
scale_to = max(64, scale_to)
|
||||
|
||||
input_scale = d / scale_to
|
||||
input_image = cv2.resize (input_image, ( int(w/input_scale), int(h/input_scale) ), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
olist = self.model.predict( np.expand_dims(input_image,0) )
|
||||
|
||||
detected_faces = self.refine (olist)
|
||||
|
||||
#filtering faces < 40pix by any side
|
||||
#enlarging bottom line a bit for 2DFAN-4, because default is not enough covering a chin
|
||||
for face in detected_faces:
|
||||
l,t,r,b = face
|
||||
tb = (b-t) * 0.1
|
||||
face[3] += tb
|
||||
new_detected_faces = []
|
||||
for l,t,r,b in detected_faces:
|
||||
bt = b-t
|
||||
if min(r-l,bt) < 40:
|
||||
continue
|
||||
new_detected_faces.append ((l,t,r,b+bt*0.1))
|
||||
|
||||
return [ (int(face[0]*input_scale),
|
||||
int(face[1]*input_scale),
|
||||
int(face[2]*input_scale),
|
||||
int(face[3]*input_scale)) for face in detected_faces ]
|
||||
int(face[3]*input_scale)) for face in new_detected_faces ]
|
||||
|
||||
def refine(self, olist):
|
||||
bboxlist = []
|
||||
|
|
|
@ -79,13 +79,24 @@ class ExtractSubprocessor(Subprocessor):
|
|||
image = self.cached_image[1]
|
||||
else:
|
||||
image = cv2_imread( filename_path_str )
|
||||
h, w, ch = image.shape
|
||||
wm = w % 2
|
||||
hm = h % 2
|
||||
if wm + hm != 0: #fix odd image
|
||||
image = image[0:h-hm,0:w-wm,:]
|
||||
self.cached_image = ( filename_path_str, image )
|
||||
|
||||
if image is None:
|
||||
self.log_err ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
|
||||
else:
|
||||
if self.type == 'rects':
|
||||
h, w, ch = image.shape
|
||||
if min(w,h) < 128:
|
||||
self.log_err ( 'Image is too small %s : [%d, %d]' % ( str(filename_path), w, h ) )
|
||||
rects = []
|
||||
else:
|
||||
rects = self.e.extract_from_bgr (image)
|
||||
|
||||
return [str(filename_path), rects]
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue