optimizations of nnlib and SampleGeneratorFace,

refactorings
This commit is contained in:
iperov 2019-01-22 11:52:04 +04:00
commit b6c4171ea1
9 changed files with 175 additions and 79 deletions

View file

@ -9,12 +9,13 @@ class SampleType(IntEnum):
FACE = 1 #aligned face unsorted
FACE_YAW_SORTED = 2 #sorted by yaw
FACE_YAW_SORTED_AS_TARGET = 3 #sorted by yaw and included only yaws which exist in TARGET also automatic mirrored
FACE_END = 3
FACE_WITH_CLOSE_TO_SELF = 4
FACE_END = 4
QTY = 4
QTY = 5
class Sample(object):
def __init__(self, sample_type=None, filename=None, face_type=None, shape=None, landmarks=None, yaw=None, mirror=None, nearest_target_list=None):
def __init__(self, sample_type=None, filename=None, face_type=None, shape=None, landmarks=None, yaw=None, mirror=None, close_target_list=None):
self.sample_type = sample_type if sample_type is not None else SampleType.IMAGE
self.filename = filename
self.face_type = face_type
@ -22,9 +23,9 @@ class Sample(object):
self.landmarks = np.array(landmarks) if landmarks is not None else None
self.yaw = yaw
self.mirror = mirror
self.nearest_target_list = nearest_target_list
self.close_target_list = close_target_list
def copy_and_set(self, sample_type=None, filename=None, face_type=None, shape=None, landmarks=None, yaw=None, mirror=None, nearest_target_list=None):
def copy_and_set(self, sample_type=None, filename=None, face_type=None, shape=None, landmarks=None, yaw=None, mirror=None, close_target_list=None):
return Sample(
sample_type=sample_type if sample_type is not None else self.sample_type,
filename=filename if filename is not None else self.filename,
@ -33,7 +34,7 @@ class Sample(object):
landmarks=landmarks if landmarks is not None else self.landmarks.copy(),
yaw=yaw if yaw is not None else self.yaw,
mirror=mirror if mirror is not None else self.mirror,
nearest_target_list=nearest_target_list if nearest_target_list is not None else self.nearest_target_list)
close_target_list=close_target_list if close_target_list is not None else self.close_target_list)
def load_bgr(self):
img = cv2.imread (self.filename).astype(np.float32) / 255.0
@ -41,7 +42,7 @@ class Sample(object):
img = img[:,::-1].copy()
return img
def get_random_nearest_target_sample(self):
if self.nearest_target_list is None:
def get_random_close_target_sample(self):
if self.close_target_list is None:
return None
return self.nearest_target_list[randint (0, len(self.nearest_target_list)-1)]
return self.close_target_list[randint (0, len(self.close_target_list)-1)]

View file

@ -11,13 +11,14 @@ from samples import SampleLoader
from samples import SampleGeneratorBase
'''
arg
output_sample_types = [
[SampleProcessor.TypeFlags, size, (optional)random_sub_size] ,
...
]
'''
class SampleGeneratorFace(SampleGeneratorBase):
def __init__ (self, samples_path, debug, batch_size, sort_by_yaw=False, sort_by_yaw_target_samples_path=None, sample_process_options=SampleProcessor.Options(), output_sample_types=[], **kwargs):
def __init__ (self, samples_path, debug, batch_size, sort_by_yaw=False, sort_by_yaw_target_samples_path=None, with_close_to_self=False, sample_process_options=SampleProcessor.Options(), output_sample_types=[], generators_count=2, **kwargs):
super().__init__(samples_path, debug, batch_size)
self.sample_process_options = sample_process_options
self.output_sample_types = output_sample_types
@ -26,24 +27,20 @@ class SampleGeneratorFace(SampleGeneratorBase):
self.sample_type = SampleType.FACE_YAW_SORTED_AS_TARGET
elif sort_by_yaw:
self.sample_type = SampleType.FACE_YAW_SORTED
elif with_close_to_self:
self.sample_type = SampleType.FACE_WITH_CLOSE_TO_SELF
else:
self.sample_type = SampleType.FACE
self.sample_type = SampleType.FACE
self.samples = SampleLoader.load (self.sample_type, self.samples_path, sort_by_yaw_target_samples_path)
self.generators_count = min ( generators_count, len(self.samples) )
if self.debug:
self.generator_samples = [ self.samples ]
self.generators = [iter_utils.ThisThreadGenerator ( self.batch_func, 0 )]
else:
if len(self.samples) > 1:
self.generator_samples = [ self.samples[0::2],
self.samples[1::2] ]
self.generators = [iter_utils.SubprocessGenerator ( self.batch_func, 0 ),
iter_utils.SubprocessGenerator ( self.batch_func, 1 )]
else:
self.generator_samples = [ self.samples ]
self.generators = [iter_utils.SubprocessGenerator ( self.batch_func, 0 )]
self.generators = [iter_utils.SubprocessGenerator ( self.batch_func, i ) for i in range(self.generators_count) ]
self.generator_counter = -1
def __iter__(self):
@ -55,7 +52,8 @@ class SampleGeneratorFace(SampleGeneratorBase):
return next(generator)
def batch_func(self, generator_id):
samples = self.generator_samples[generator_id]
samples = self.samples[generator_id::self.generators_count]
data_len = len(samples)
if data_len == 0:
raise ValueError('No training data provided.')
@ -64,7 +62,7 @@ class SampleGeneratorFace(SampleGeneratorBase):
if all ( [ x == None for x in samples] ):
raise ValueError('Not enough training data. Gather more faces!')
if self.sample_type == SampleType.FACE:
if self.sample_type == SampleType.FACE or self.sample_type == SampleType.FACE_WITH_CLOSE_TO_SELF:
shuffle_idxs = []
elif self.sample_type == SampleType.FACE_YAW_SORTED or self.sample_type == SampleType.FACE_YAW_SORTED_AS_TARGET:
shuffle_idxs = []
@ -77,7 +75,7 @@ class SampleGeneratorFace(SampleGeneratorBase):
while True:
sample = None
if self.sample_type == SampleType.FACE:
if self.sample_type == SampleType.FACE or self.sample_type == SampleType.FACE_WITH_CLOSE_TO_SELF:
if len(shuffle_idxs) == 0:
shuffle_idxs = random.sample( range(data_len), data_len )
idx = shuffle_idxs.pop()

View file

@ -23,9 +23,6 @@ class SampleLoader:
if str(samples_path) not in cache.keys():
cache[str(samples_path)] = [None]*SampleType.QTY
if target_samples_path is not None and str(target_samples_path) not in cache.keys():
cache[str(target_samples_path)] = [None]*SampleType.QTY
datas = cache[str(samples_path)]
if sample_type == SampleType.IMAGE:
@ -45,7 +42,11 @@ class SampleLoader:
if target_samples_path is None:
raise Exception('target_samples_path is None for FACE_YAW_SORTED_AS_TARGET')
datas[sample_type] = SampleLoader.upgradeToFaceYawSortedAsTargetSamples( SampleLoader.load(SampleType.FACE_YAW_SORTED, samples_path), SampleLoader.load(SampleType.FACE_YAW_SORTED, target_samples_path) )
elif sample_type == SampleType.FACE_WITH_CLOSE_TO_SELF:
if datas[sample_type] is None:
datas[sample_type] = SampleLoader.upgradeToFaceCloseToSelfSamples( SampleLoader.load(SampleType.FACE, samples_path) )
return datas[sample_type]
@staticmethod
@ -69,6 +70,39 @@ class SampleLoader:
landmarks=dflpng.get_landmarks(),
yaw=dflpng.get_yaw_value()) )
return sample_list
@staticmethod
def upgradeToFaceCloseToSelfSamples (samples):
yaw_samples = SampleLoader.upgradeToFaceYawSortedSamples(samples)
yaw_samples_len = len(yaw_samples)
sample_list = []
for i in tqdm( range(yaw_samples_len), desc="Sorting" ):
if yaw_samples[i] is not None:
for s in yaw_samples[i]:
s_t = []
for n in range(2000):
yaw_idx = np.clip ( i-10 +np.random.randint(20), 0, yaw_samples_len-1 )
if yaw_samples[yaw_idx] is None:
continue
yaw_idx_samples_len = len(yaw_samples[yaw_idx])
yaw_idx_sample = yaw_samples[yaw_idx][ np.random.randint(yaw_idx_samples_len) ]
if s.filename == yaw_idx_sample.filename:
continue
s_t.append ( yaw_idx_sample )
if len(s_t) >= 50:
break
if len(s_t) == 0:
s_t = [s]
sample_list.append( s.copy_and_set(close_target_list = s_t) )
return sample_list
@staticmethod

View file

@ -14,10 +14,15 @@ class SampleProcessor(object):
TRANSFORMED = 0x00000008,
LANDMARKS_ARRAY = 0x00000010, #currently unused
RANDOM_CLOSE = 0x00000020,
MORPH_TO_RANDOM_CLOSE \
= 0x00000040,
FACE_ALIGN_HALF = 0x00000100,
FACE_ALIGN_FULL = 0x00000200,
FACE_ALIGN_HEAD = 0x00000400,
FACE_ALIGN_AVATAR = 0x00000800,
FACE_ALIGN_AVATAR = 0x00000800,
FACE_MASK_FULL = 0x00001000,
FACE_MASK_EYES = 0x00002000,
@ -38,18 +43,24 @@ class SampleProcessor(object):
@staticmethod
def process (sample, sample_process_options, output_sample_types, debug):
source = sample.load_bgr()
h,w,c = source.shape
sample_bgr = sample.load_bgr()
h,w,c = sample_bgr.shape
is_face_sample = sample.landmarks is not None
if debug and is_face_sample:
LandmarksProcessor.draw_landmarks (source, sample.landmarks, (0, 1, 0))
LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0))
close_sample = sample.close_target_list[ np.random.randint(0, len(sample.close_target_list)) ] if sample.close_target_list is not None else None
close_sample_bgr = close_sample.load_bgr() if close_sample is not None else None
if debug and close_sample_bgr is not None:
LandmarksProcessor.draw_landmarks (close_sample_bgr, close_sample.landmarks, (0, 1, 0))
params = image_utils.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )
params = image_utils.gen_warp_params(source, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )
images = [[None]*3 for _ in range(5)]
images = [[None]*3 for _ in range(30)]
sample_rnd_seed = np.random.randint(0x80000000)
outputs = []
@ -71,6 +82,11 @@ class SampleProcessor(object):
else:
raise ValueError ('expected SampleTypeFlags type')
if f & SampleProcessor.TypeFlags.RANDOM_CLOSE != 0:
img_type += 10
elif f & SampleProcessor.TypeFlags.MORPH_TO_RANDOM_CLOSE != 0:
img_type += 20
face_mask_type = 0
if f & SampleProcessor.TypeFlags.FACE_MASK_FULL != 0:
face_mask_type = 1
@ -92,14 +108,54 @@ class SampleProcessor(object):
l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
l = np.clip(l, 0.0, 1.0)
img = l
else:
else:
if images[img_type][face_mask_type] is None:
img = source
if img_type >= 10 and img_type <= 19: #RANDOM_CLOSE
img_type -= 10
img = close_sample_bgr
cur_sample = close_sample
elif img_type >= 20 and img_type <= 29: #MORPH_TO_RANDOM_CLOSE
img_type -= 20
res = sample.shape[0]
s_landmarks = sample.landmarks.copy()
d_landmarks = close_sample.landmarks.copy()
idxs = list(range(len(s_landmarks)))
#remove landmarks near boundaries
for i in idxs[:]:
s_l = s_landmarks[i]
d_l = d_landmarks[i]
if s_l[0] < 5 or s_l[1] < 5 or s_l[0] >= res-5 or s_l[1] >= res-5 or \
d_l[0] < 5 or d_l[1] < 5 or d_l[0] >= res-5 or d_l[1] >= res-5:
idxs.remove(i)
#remove landmarks that close to each other in 5 dist
for landmarks in [s_landmarks, d_landmarks]:
for i in idxs[:]:
s_l = landmarks[i]
for j in idxs[:]:
if i == j:
continue
s_l_2 = landmarks[j]
diff_l = np.abs(s_l - s_l_2)
if np.sqrt(diff_l.dot(diff_l)) < 5:
idxs.remove(i)
break
s_landmarks = s_landmarks[idxs]
d_landmarks = d_landmarks[idxs]
s_landmarks = np.concatenate ( [s_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] )
d_landmarks = np.concatenate ( [d_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] )
img = image_utils.morph_by_points (sample_bgr, s_landmarks, d_landmarks)
cur_sample = close_sample
else:
img = sample_bgr
cur_sample = sample
if is_face_sample:
if face_mask_type == 1:
img = np.concatenate( (img, LandmarksProcessor.get_image_hull_mask (source, sample.landmarks) ), -1 )
img = np.concatenate( (img, LandmarksProcessor.get_image_hull_mask (img.shape, cur_sample.landmarks) ), -1 )
elif face_mask_type == 2:
mask = LandmarksProcessor.get_image_eye_mask (source, sample.landmarks)
mask = LandmarksProcessor.get_image_eye_mask (img.shape, cur_sample.landmarks)
mask = np.expand_dims (cv2.blur (mask, ( w // 32, w // 32 ) ), -1)
mask[mask > 0.0] = 1.0
img = np.concatenate( (img, mask ), -1 )
@ -107,11 +163,10 @@ class SampleProcessor(object):
images[img_type][face_mask_type] = image_utils.warp_by_params (params, img, (img_type==1 or img_type==2), (img_type==2 or img_type==3), img_type != 0, face_mask_type == 0)
img = images[img_type][face_mask_type]
if is_face_sample and target_face_type != -1:
if target_face_type > sample.face_type:
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_face_type) )
img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, size, target_face_type), (size,size), flags=cv2.INTER_LANCZOS4 )
else:
img = cv2.resize( img, (size,size), cv2.INTER_LANCZOS4 )