small fixed and refactorings

This commit is contained in:
iperov 2019-03-16 20:55:51 +04:00
parent d6a45763a2
commit f3b343c0e5
9 changed files with 479 additions and 170 deletions

Binary file not shown.

File diff suppressed because one or more lines are too long

View file

@ -282,7 +282,6 @@ def mirror_landmarks (landmarks, val):
return result return result
def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=False): def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=False):
image = image.copy()
if len(image_landmarks) != 68: if len(image_landmarks) != 68:
raise Exception('get_image_eye_mask works only with 68 landmarks') raise Exception('get_image_eye_mask works only with 68 landmarks')
@ -309,17 +308,15 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=Fa
if transparent_mask: if transparent_mask:
mask = get_image_hull_mask (image.shape, image_landmarks) mask = get_image_hull_mask (image.shape, image_landmarks)
image = image * (1-mask) + image * mask / 2 image[...] = ( image * (1-mask) + image * mask / 2 )[...]
return image
def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False): def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False):
image = draw_landmarks(image, image_landmarks, transparent_mask=transparent_mask) draw_landmarks(image, image_landmarks, transparent_mask=transparent_mask)
image_utils.draw_rect (image, rect, (255,0,0), 2 ) image_utils.draw_rect (image, rect, (255,0,0), 2 )
image_to_face_mat = get_transform_mat (image_landmarks, face_size, face_type) image_to_face_mat = get_transform_mat (image_landmarks, face_size, face_type)
points = transform_points ( [ (0,0), (0,face_size-1), (face_size-1, face_size-1), (face_size-1,0) ], image_to_face_mat, True) points = transform_points ( [ (0,0), (0,face_size-1), (face_size-1, face_size-1), (face_size-1,0) ], image_to_face_mat, True)
image_utils.draw_polygon (image, points, (0,0,255), 2) image_utils.draw_polygon (image, points, (0,0,255), 2)
return image
def calc_face_pitch(landmarks): def calc_face_pitch(landmarks):
if not isinstance(landmarks, np.ndarray): if not isinstance(landmarks, np.ndarray):

View file

@ -79,93 +79,106 @@ class ExtractSubprocessor(Subprocessor):
image = self.cached_image[1] image = self.cached_image[1]
else: else:
image = cv2_imread( filename_path_str ) image = cv2_imread( filename_path_str )
h, w, ch = image.shape
if image is None:
self.log_err ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
return None
image_shape = image.shape
if len(image_shape) == 2:
h, w = image.shape
ch = 1
else:
h, w, ch = image.shape
if ch == 1:
image = np.repeat ( image [:,:,np.newaxis], 3, -1 )
elif ch == 4:
image = image[:,:,0:3]
wm = w % 2 wm = w % 2
hm = h % 2 hm = h % 2
if wm + hm != 0: #fix odd image if wm + hm != 0: #fix odd image
image = image[0:h-hm,0:w-wm,:] image = image[0:h-hm,0:w-wm,:]
self.cached_image = ( filename_path_str, image ) self.cached_image = ( filename_path_str, image )
if image is None: if self.type == 'rects':
self.log_err ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) ) h, w, ch = image.shape
else: if min(w,h) < 128:
if self.type == 'rects': self.log_err ( 'Image is too small %s : [%d, %d]' % ( str(filename_path), w, h ) )
h, w, ch = image.shape rects = []
if min(w,h) < 128: else:
self.log_err ( 'Image is too small %s : [%d, %d]' % ( str(filename_path), w, h ) ) rects = self.e.extract_from_bgr (image)
rects = []
return [str(filename_path), rects]
elif self.type == 'landmarks':
rects = data[1]
landmarks = self.e.extract_from_bgr (image, rects)
return [str(filename_path), landmarks]
elif self.type == 'final':
src_dflimg = None
(h,w,c) = image.shape
if h == w:
#extracting from already extracted jpg image?
if filename_path.suffix == '.jpg':
src_dflimg = DFLJPG.load ( str(filename_path) )
result = []
faces = data[1]
if self.debug:
debug_output_file = '{}{}'.format( str(Path(str(self.output_path) + '_debug') / filename_path.stem), '.jpg')
debug_image = image.copy()
face_idx = 0
for face in faces:
rect = np.array(face[0])
image_landmarks = np.array(face[1])
if self.face_type == FaceType.MARK_ONLY:
face_image = image
face_image_landmarks = image_landmarks
else: else:
rects = self.e.extract_from_bgr (image) image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
return [str(filename_path), rects] landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,self.image_size-1), (self.image_size-1, self.image_size-1), (self.image_size-1,0) ], image_to_face_mat, True)
elif self.type == 'landmarks': rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]), np.array(rect[[1,1,3,3]]))
rects = data[1] landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0], landmarks_bbox[:,1] )
landmarks = self.e.extract_from_bgr (image, rects)
return [str(filename_path), landmarks]
elif self.type == 'final': if landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
src_dflimg = None continue
(h,w,c) = image.shape
if h == w:
#extracting from already extracted jpg image?
if filename_path.suffix == '.jpg':
src_dflimg = DFLJPG.load ( str(filename_path) )
result = []
faces = data[1]
if self.debug: if self.debug:
debug_output_file = '{}{}'.format( str(Path(str(self.output_path) + '_debug') / filename_path.stem), '.jpg') LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type, transparent_mask=True)
debug_image = image.copy()
face_idx = 0 output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
for face in faces: face_idx += 1
rect = np.array(face[0])
image_landmarks = np.array(face[1])
if self.face_type == FaceType.MARK_ONLY: if src_dflimg is not None:
face_image = image #if extracting from dflimg just copy it in order not to lose quality
face_image_landmarks = image_landmarks shutil.copy ( str(filename_path), str(output_file) )
else: else:
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type) cv2_imwrite(output_file, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), 85] )
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,self.image_size-1), (self.image_size-1, self.image_size-1), (self.image_size-1,0) ], image_to_face_mat, True) DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
landmarks = face_image_landmarks.tolist(),
source_filename = filename_path.name,
source_rect= rect,
source_landmarks = image_landmarks.tolist()
)
rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]), np.array(rect[[1,1,3,3]])) result.append (output_file)
landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0], landmarks_bbox[:,1] )
if landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area if self.debug:
continue cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
if self.debug: return result
debug_image = LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type, transparent_mask=True)
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
face_idx += 1
if src_dflimg is not None:
#if extracting from dflimg just copy it in order not to lose quality
shutil.copy ( str(filename_path), str(output_file) )
else:
cv2_imwrite(output_file, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), 85] )
DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
landmarks = face_image_landmarks.tolist(),
source_filename = filename_path.name,
source_rect= rect,
source_landmarks = image_landmarks.tolist()
)
result.append (output_file)
if self.debug:
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
return result
return None
#overridable #overridable
def get_data_name (self, data): def get_data_name (self, data):
@ -208,7 +221,7 @@ class ExtractSubprocessor(Subprocessor):
self.y = 0 self.y = 0
self.rect_size = 100 self.rect_size = 100
self.rect_locked = False self.rect_locked = False
self.redraw_needed = True self.extract_needed = True
io.progress_bar (None, len (self.input_data)) io.progress_bar (None, len (self.input_data))
@ -277,13 +290,12 @@ class ExtractSubprocessor(Subprocessor):
if len (self.input_data) > 0: if len (self.input_data) > 0:
return self.input_data.pop(0) return self.input_data.pop(0)
else: else:
skip_remaining = False
allow_remark_faces = False allow_remark_faces = False
while len (self.input_data) > 0: while len (self.input_data) > 0:
data = self.input_data[0] data = self.input_data[0]
filename, faces = data filename, faces = data
is_frame_done = False is_frame_done = False
go_to_prev_frame = False
# Can we mark an image that already has a marked face? # Can we mark an image that already has a marked face?
if allow_remark_faces: if allow_remark_faces:
@ -291,10 +303,9 @@ class ExtractSubprocessor(Subprocessor):
# If there was already a face then lock the rectangle to it until the mouse is clicked # If there was already a face then lock the rectangle to it until the mouse is clicked
if len(faces) > 0: if len(faces) > 0:
self.rect, self.landmarks = faces.pop() self.rect, self.landmarks = faces.pop()
self.rect_locked = True
self.redraw_needed = True
faces.clear() faces.clear()
self.extract_needed = True
self.rect_locked = True
self.rect_size = ( self.rect[2] - self.rect[0] ) / 2 self.rect_size = ( self.rect[2] - self.rect[0] ) / 2
self.x = ( self.rect[0] + self.rect[2] ) / 2 self.x = ( self.rect[0] + self.rect[2] ) / 2
self.y = ( self.rect[1] + self.rect[3] ) / 2 self.y = ( self.rect[1] + self.rect[3] ) / 2
@ -333,6 +344,8 @@ class ExtractSubprocessor(Subprocessor):
self.cache_text_lines_img = (sh, self.text_lines_img) self.cache_text_lines_img = (sh, self.text_lines_img)
while True: while True:
io.process_messages(0.0001)
new_x = self.x new_x = self.x
new_y = self.y new_y = self.y
new_rect_size = self.rect_size new_rect_size = self.rect_size
@ -346,7 +359,7 @@ class ExtractSubprocessor(Subprocessor):
new_rect_size = max (5, new_rect_size + diff*mod) new_rect_size = max (5, new_rect_size + diff*mod)
elif ev == io.EVENT_LBUTTONDOWN: elif ev == io.EVENT_LBUTTONDOWN:
self.rect_locked = not self.rect_locked self.rect_locked = not self.rect_locked
self.redraw_needed = True self.extract_needed = True
elif not self.rect_locked: elif not self.rect_locked:
new_x = np.clip (x, 0, w-1) / self.view_scale new_x = np.clip (x, 0, w-1) / self.view_scale
new_y = np.clip (y, 0, h-1) / self.view_scale new_y = np.clip (y, 0, h-1) / self.view_scale
@ -355,28 +368,47 @@ class ExtractSubprocessor(Subprocessor):
key, = key_events[-1] if len(key_events) > 0 else (0,) key, = key_events[-1] if len(key_events) > 0 else (0,)
if key == ord('\r') or key == ord('\n'): if key == ord('\r') or key == ord('\n'):
faces.append ( [(self.rect), self.landmarks] ) #confirm frame
is_frame_done = True is_frame_done = True
faces.append ( [(self.rect), self.landmarks] )
break break
elif key == ord(' '): elif key == ord(' '):
is_frame_done = True #confirm skip frame
break
elif key == ord('.'):
allow_remark_faces = True
# Only save the face if the rect is still locked
if self.rect_locked:
faces.append ( [(self.rect), self.landmarks] )
is_frame_done = True is_frame_done = True
break break
elif key == ord(',') and len(self.result) > 0: elif key == ord(',') and len(self.result) > 0:
# Only save the face if the rect is still locked #go prev frame
if self.rect_locked: if self.rect_locked:
# Only save the face if the rect is still locked
faces.append ( [(self.rect), self.landmarks] )
self.input_data.insert(0, self.result.pop() )
io.progress_bar_inc(-1)
allow_remark_faces = True
self.extract_needed = True
self.rect_locked = False
break
elif key == ord('.'):
#go next frame
is_frame_done = True
allow_remark_faces = True
if self.rect_locked:
# Only save the face if the rect is still locked
faces.append ( [(self.rect), self.landmarks] ) faces.append ( [(self.rect), self.landmarks] )
go_to_prev_frame = True
break break
elif key == ord('q'): elif key == ord('q'):
skip_remaining = True #skip remaining
if self.rect_locked:
faces.append ( [(self.rect), self.landmarks] )
while len(self.input_data) > 0:
self.result.append( self.input_data.pop(0) )
io.progress_bar_inc(1)
break break
elif key == ord('h'): elif key == ord('h'):
self.hide_help = not self.hide_help self.hide_help = not self.hide_help
break break
@ -384,7 +416,7 @@ class ExtractSubprocessor(Subprocessor):
if self.x != new_x or \ if self.x != new_x or \
self.y != new_y or \ self.y != new_y or \
self.rect_size != new_rect_size or \ self.rect_size != new_rect_size or \
self.redraw_needed: self.extract_needed:
self.x = new_x self.x = new_x
self.y = new_y self.y = new_y
self.rect_size = new_rect_size self.rect_size = new_rect_size
@ -396,7 +428,6 @@ class ExtractSubprocessor(Subprocessor):
return [filename, [self.rect]] return [filename, [self.rect]]
io.process_messages(0.0001)
else: else:
is_frame_done = True is_frame_done = True
@ -404,20 +435,8 @@ class ExtractSubprocessor(Subprocessor):
self.result.append ( data ) self.result.append ( data )
self.input_data.pop(0) self.input_data.pop(0)
io.progress_bar_inc(1) io.progress_bar_inc(1)
self.redraw_needed = True self.extract_needed = True
self.rect_locked = False self.rect_locked = False
elif go_to_prev_frame:
self.input_data.insert(0, self.result.pop() )
io.progress_bar_inc(-1)
allow_remark_faces = True
self.redraw_needed = True
self.rect_locked = False
elif skip_remaining:
if self.rect_locked:
faces.append ( [(self.rect), self.landmarks] )
while len(self.input_data) > 0:
self.result.append( self.input_data.pop(0) )
io.progress_bar_inc(1)
return None return None
@ -457,11 +476,11 @@ class ExtractSubprocessor(Subprocessor):
image = cv2.warpAffine(image, mat,(w,h) ) image = cv2.warpAffine(image, mat,(w,h) )
view_landmarks = LandmarksProcessor.transform_points (view_landmarks, mat) view_landmarks = LandmarksProcessor.transform_points (view_landmarks, mat)
image = LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.image_size, self.face_type) LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.image_size, self.face_type)
if self.rect_locked: if self.rect_locked:
image = LandmarksProcessor.draw_landmarks(image, view_landmarks, (255,255,0) ) LandmarksProcessor.draw_landmarks(image, view_landmarks, (255,255,0) )
self.redraw_needed = False self.extract_needed = False
io.show_image (self.wnd_name, image) io.show_image (self.wnd_name, image)
else: else:

View file

@ -0,0 +1,287 @@
import traceback
import os
import sys
import time
import numpy as np
import numpy.linalg as npl
import cv2
from pathlib import Path
from interact import interact as io
from utils.cv2_utils import *
from utils import Path_utils
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from facelib import LandmarksProcessor
def main(input_dir, output_dir):
input_path = Path(input_dir)
output_path = Path(output_dir)
if not input_path.exists():
raise ValueError('Input directory not found. Please ensure it exists.')
if not output_path.exists():
output_path.mkdir(parents=True)
wnd_name = "Labeling tool"
io.named_window (wnd_name)
io.capture_mouse(wnd_name)
io.capture_keys(wnd_name)
#for filename in io.progress_bar_generator (Path_utils.get_image_paths(input_path), desc="Labeling"):
for filename in Path_utils.get_image_paths(input_path):
filepath = Path(filename)
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath) )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath) )
else:
dflimg = None
if dflimg is None:
io.log_err ("%s is not a dfl image file" % (filepath.name) )
continue
lmrks = dflimg.get_landmarks()
lmrks_list = lmrks.tolist()
orig_img = cv2_imread(str(filepath))
h,w,c = orig_img.shape
mask_orig = LandmarksProcessor.get_image_hull_mask( orig_img.shape, lmrks).astype(np.uint8)[:,:,0]
ero_dil_rate = w // 8
mask_ero = cv2.erode (mask_orig, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero_dil_rate,ero_dil_rate)), iterations = 1 )
mask_dil = cv2.dilate(mask_orig, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero_dil_rate,ero_dil_rate)), iterations = 1 )
#mask_bg = np.zeros(orig_img.shape[:2],np.uint8)
mask_bg = 1-mask_dil
mask_bgp = np.ones(orig_img.shape[:2],np.uint8) #default - all background possible
mask_fg = np.zeros(orig_img.shape[:2],np.uint8)
mask_fgp = np.zeros(orig_img.shape[:2],np.uint8)
img = orig_img.copy()
l_thick=2
def draw_4_lines (masks_out, pts, thickness=1):
fgp,fg,bg,bgp = masks_out
h,w = fg.shape
fgp_pts = []
fg_pts = np.array([ pts[i:i+2] for i in range(len(pts)-1)])
bg_pts = []
bgp_pts = []
for i in range(len(fg_pts)):
a, b = line = fg_pts[i]
ba = b-a
v = ba / npl.norm(ba)
ccpv = np.array([v[1],-v[0]])
cpv = np.array([-v[1],v[0]])
step = 1 / max(np.abs(cpv))
fgp_pts.append ( np.clip (line + ccpv * step * thickness, 0, w-1 ).astype(np.int) )
bg_pts.append ( np.clip (line + cpv * step * thickness, 0, w-1 ).astype(np.int) )
bgp_pts.append ( np.clip (line + cpv * step * thickness * 2, 0, w-1 ).astype(np.int) )
fgp_pts = np.array(fgp_pts)
bg_pts = np.array(bg_pts)
bgp_pts = np.array(bgp_pts)
cv2.polylines(fgp, fgp_pts, False, (1,), thickness=thickness)
cv2.polylines(fg, fg_pts, False, (1,), thickness=thickness)
cv2.polylines(bg, bg_pts, False, (1,), thickness=thickness)
cv2.polylines(bgp, bgp_pts, False, (1,), thickness=thickness)
def draw_lines ( masks_steps, pts, thickness=1):
lines = np.array([ pts[i:i+2] for i in range(len(pts)-1)])
for mask, step in masks_steps:
h,w = mask.shape
mask_lines = []
for i in range(len(lines)):
a, b = line = lines[i]
ba = b-a
ba_len = npl.norm(ba)
if ba_len != 0:
v = ba / ba_len
pv = np.array([-v[1],v[0]])
pv_inv_max = 1 / max(np.abs(pv))
mask_lines.append ( np.clip (line + pv * pv_inv_max * thickness * step, 0, w-1 ).astype(np.int) )
else:
mask_lines.append ( np.array(line, dtype=np.int) )
cv2.polylines(mask, mask_lines, False, (1,), thickness=thickness)
def draw_fill_convex( mask_out, pts, scale=1.0 ):
hull = cv2.convexHull(np.array(pts))
if scale !=1.0:
pts_count = hull.shape[0]
sum_x = np.sum(hull[:, 0, 0])
sum_y = np.sum(hull[:, 0, 1])
hull_center = np.array([sum_x/pts_count, sum_y/pts_count])
hull = hull_center+(hull-hull_center)*scale
hull = hull.astype(pts.dtype)
cv2.fillConvexPoly( mask_out, hull, (1,) )
def get_gc_mask_bgr(gc_mask):
h, w = gc_mask.shape
bgr = np.zeros( (h,w,3), dtype=np.uint8 )
bgr [ gc_mask == 0 ] = (0,0,0)
bgr [ gc_mask == 1 ] = (255,255,255)
bgr [ gc_mask == 2 ] = (0,0,255) #RED
bgr [ gc_mask == 3 ] = (0,255,0) #GREEN
return bgr
def get_gc_mask_result(gc_mask):
return np.where((gc_mask==1) + (gc_mask==3),1,0).astype(np.int)
#convex inner of right chin to end of right eyebrow
#draw_fill_convex ( mask_fgp, lmrks_list[8:17]+lmrks_list[26:27] )
#convex inner of start right chin to right eyebrow
#draw_fill_convex ( mask_fgp, lmrks_list[8:9]+lmrks_list[22:27] )
#convex inner of nose
draw_fill_convex ( mask_fgp, lmrks[27:36] )
#convex inner of nose half
draw_fill_convex ( mask_fg, lmrks[27:36], scale=0.5 )
#left corner of mouth to left corner of nose
#draw_lines ( [ (mask_fg,0), ], lmrks_list[49:50]+lmrks_list[32:33], l_thick)
#convex inner: right corner of nose to centers of eyebrows
#draw_fill_convex ( mask_fgp, lmrks_list[35:36]+lmrks_list[19:20]+lmrks_list[24:25])
#right corner of mouth to right corner of nose
#draw_lines ( [ (mask_fg,0), ], lmrks_list[54:55]+lmrks_list[35:36], l_thick)
#left eye
#draw_fill_convex ( mask_fg, lmrks_list[36:40] )
#right eye
#draw_fill_convex ( mask_fg, lmrks_list[42:48] )
#right chin
draw_lines ( [ (mask_bg,0), (mask_fg,-1), ], lmrks[8:17], l_thick)
#left eyebrow center to right eyeprow center
draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[19:20] + lmrks_list[24:25], l_thick)
# #draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[24:25] + lmrks_list[19:17:-1], l_thick)
#half right eyebrow to end of right chin
draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[24:27] + lmrks_list[16:17], l_thick)
#import code
#code.interact(local=dict(globals(), **locals()))
#compose mask layers
gc_mask = np.zeros(orig_img.shape[:2],np.uint8)
gc_mask [ mask_bgp==1 ] = 2
gc_mask [ mask_fgp==1 ] = 3
gc_mask [ mask_bg==1 ] = 0
gc_mask [ mask_fg==1 ] = 1
gc_bgr_before = get_gc_mask_bgr (gc_mask)
#io.show_image (wnd_name, gc_mask )
##points, hierarcy = cv2.findContours(original_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
##gc_mask = ( (1-erode_mask)*2 + erode_mask )# * dilate_mask
#gc_mask = (1-erode_mask)*2 + erode_mask
#cv2.addWeighted(
#gc_mask = mask_0_27 + (1-mask_0_27)*2
#
##import code
##code.interact(local=dict(globals(), **locals()))
#
#rect = (1,1,img.shape[1]-2,img.shape[0]-2)
#
#
cv2.grabCut(img,gc_mask,None,np.zeros((1,65),np.float64),np.zeros((1,65),np.float64),5, cv2.GC_INIT_WITH_MASK)
gc_bgr = get_gc_mask_bgr (gc_mask)
gc_mask_result = get_gc_mask_result(gc_mask)
gc_mask_result_1 = gc_mask_result[:,:,np.newaxis]
#import code
#code.interact(local=dict(globals(), **locals()))
orig_img_gc_layers_masked = (0.5*orig_img + 0.5*gc_bgr).astype(np.uint8)
orig_img_gc_before_layers_masked = (0.5*orig_img + 0.5*gc_bgr_before).astype(np.uint8)
pink_bg = np.full ( orig_img.shape, (255,0,255), dtype=np.uint8 )
orig_img_result = orig_img * gc_mask_result_1
orig_img_result_pinked = orig_img_result + pink_bg * (1-gc_mask_result_1)
#io.show_image (wnd_name, blended_img)
##gc_mask, bgdModel, fgdModel =
#
#mask2 = np.where((gc_mask==1) + (gc_mask==3),255,0).astype('uint8')[:,:,np.newaxis]
#mask2 = np.repeat(mask2, (3,), -1)
#
##mask2 = np.where(gc_mask!=0,255,0).astype('uint8')
#blended_img = orig_img #-\
# #0.3 * np.full(original_img.shape, (50,50,50)) * (1-mask_0_27)[:,:,np.newaxis]
# #0.3 * np.full(original_img.shape, (50,50,50)) * (1-dilate_mask)[:,:,np.newaxis] +\
# #0.3 * np.full(original_img.shape, (50,50,50)) * (erode_mask)[:,:,np.newaxis]
#blended_img = np.clip(blended_img, 0, 255).astype(np.uint8)
##import code
##code.interact(local=dict(globals(), **locals()))
orig_img_lmrked = orig_img.copy()
LandmarksProcessor.draw_landmarks(orig_img_lmrked, lmrks, transparent_mask=True)
screen = np.concatenate ([orig_img_gc_before_layers_masked,
orig_img_gc_layers_masked,
orig_img,
orig_img_lmrked,
orig_img_result_pinked,
orig_img_result,
], axis=1)
io.show_image (wnd_name, screen.astype(np.uint8) )
while True:
io.process_messages()
for (x,y,ev,flags) in io.get_mouse_events(wnd_name):
pass
#print (x,y,ev,flags)
key_events = [ ev for ev, in io.get_key_events(wnd_name) ]
for key in key_events:
if key == ord('1'):
pass
if key == ord('2'):
pass
if key == ord('3'):
pass
if ord(' ') in key_events:
break
import code
code.interact(local=dict(globals(), **locals()))
#original_mask = np.ones(original_img.shape[:2],np.uint8)*2
#cv2.drawContours(original_mask, points, -1, (1,), 1)

View file

@ -63,7 +63,7 @@ def add_landmarks_debug_images(input_path):
if img is not None: if img is not None:
face_landmarks = dflimg.get_landmarks() face_landmarks = dflimg.get_landmarks()
img = LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True) LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True)
output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg') output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] ) cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )

View file

@ -48,13 +48,13 @@ class SampleProcessor(object):
is_face_sample = sample.landmarks is not None is_face_sample = sample.landmarks is not None
if debug and is_face_sample: if debug and is_face_sample:
sample_bgr = LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0)) LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0))
close_sample = sample.close_target_list[ np.random.randint(0, len(sample.close_target_list)) ] if sample.close_target_list is not None else None close_sample = sample.close_target_list[ np.random.randint(0, len(sample.close_target_list)) ] if sample.close_target_list is not None else None
close_sample_bgr = close_sample.load_bgr() if close_sample is not None else None close_sample_bgr = close_sample.load_bgr() if close_sample is not None else None
if debug and close_sample_bgr is not None: if debug and close_sample_bgr is not None:
close_sample_bgr = LandmarksProcessor.draw_landmarks (close_sample_bgr, close_sample.landmarks, (0, 1, 0)) LandmarksProcessor.draw_landmarks (close_sample_bgr, close_sample.landmarks, (0, 1, 0))
params = image_utils.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range ) params = image_utils.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )

View file

@ -108,43 +108,45 @@ class DFLJPG(object):
return inst return inst
except Exception as e: except Exception as e:
raise Exception("Corrupted JPG file: %s" % (str(e))) raise Exception ("Corrupted JPG file: %s" % (str(e)))
return None
@staticmethod @staticmethod
def load(filename): def load(filename):
inst = DFLJPG.load_raw (filename) try:
inst.dfl_dict = None inst = DFLJPG.load_raw (filename)
inst.dfl_dict = None
for chunk in inst.chunks: for chunk in inst.chunks:
if chunk['name'] == 'APP0': if chunk['name'] == 'APP0':
d, c = chunk['data'], 0 d, c = chunk['data'], 0
c, id, _ = struct_unpack (d, c, "=4sB") c, id, _ = struct_unpack (d, c, "=4sB")
if id == b"JFIF": if id == b"JFIF":
c, ver_major, ver_minor, units, Xdensity, Ydensity, Xthumbnail, Ythumbnail = struct_unpack (d, c, "=BBBHHBB") c, ver_major, ver_minor, units, Xdensity, Ydensity, Xthumbnail, Ythumbnail = struct_unpack (d, c, "=BBBHHBB")
#if units == 0: #if units == 0:
# inst.shape = (Ydensity, Xdensity, 3) # inst.shape = (Ydensity, Xdensity, 3)
else: else:
raise Exception("Unknown jpeg ID: %s" % (id) ) raise Exception("Unknown jpeg ID: %s" % (id) )
elif chunk['name'] == 'SOF0' or chunk['name'] == 'SOF2': elif chunk['name'] == 'SOF0' or chunk['name'] == 'SOF2':
d, c = chunk['data'], 0 d, c = chunk['data'], 0
c, precision, height, width = struct_unpack (d, c, ">BHH") c, precision, height, width = struct_unpack (d, c, ">BHH")
inst.shape = (height, width, 3) inst.shape = (height, width, 3)
elif chunk['name'] == 'APP15': elif chunk['name'] == 'APP15':
if type(chunk['data']) == bytes: if type(chunk['data']) == bytes:
inst.dfl_dict = pickle.loads(chunk['data']) inst.dfl_dict = pickle.loads(chunk['data'])
if (inst.dfl_dict is not None) and ('face_type' not in inst.dfl_dict.keys()): if (inst.dfl_dict is not None) and ('face_type' not in inst.dfl_dict.keys()):
inst.dfl_dict['face_type'] = FaceType.toString (FaceType.FULL) inst.dfl_dict['face_type'] = FaceType.toString (FaceType.FULL)
if inst.dfl_dict == None: if inst.dfl_dict == None:
return None
return inst
except Exception as e:
print (e)
return None return None
return inst
@staticmethod @staticmethod
def embed_data(filename, face_type=None, def embed_data(filename, face_type=None,
landmarks=None, landmarks=None,

View file

@ -249,17 +249,21 @@ class DFLPNG(object):
@staticmethod @staticmethod
def load(filename): def load(filename):
inst = DFLPNG.load_raw (filename) try:
inst.fcwp_dict = inst.getDFLDictData() inst = DFLPNG.load_raw (filename)
inst.fcwp_dict = inst.getDFLDictData()
if (inst.fcwp_dict is not None) and ('face_type' not in inst.fcwp_dict.keys()): if (inst.fcwp_dict is not None) and ('face_type' not in inst.fcwp_dict.keys()):
inst.fcwp_dict['face_type'] = FaceType.toString (FaceType.FULL) inst.fcwp_dict['face_type'] = FaceType.toString (FaceType.FULL)
if inst.fcwp_dict == None: if inst.fcwp_dict == None:
return None
return inst
except Exception as e:
print(e)
return None return None
return inst
@staticmethod @staticmethod
def embed_data(filename, face_type=None, def embed_data(filename, face_type=None,
landmarks=None, landmarks=None,