refactorings, added motion blur to SampleProcessor for FANSegmentator trainer

This commit is contained in:
iperov 2019-04-07 23:08:00 +04:00
parent a88ee7d093
commit 58d7e990f4
9 changed files with 238 additions and 72 deletions

View file

@ -23,3 +23,5 @@ from .DCSCN import DCSCN
from .common import normalize_channels from .common import normalize_channels
from .IEPolys import IEPolys from .IEPolys import IEPolys
from .blur import LinearMotionBlur

143
imagelib/blur.py Normal file
View file

@ -0,0 +1,143 @@
import math
import numpy as np
from PIL import Image
from scipy.signal import convolve2d
from skimage.draw import line
class LineDictionary:
def __init__(self):
self.lines = {}
self.Create3x3Lines()
self.Create5x5Lines()
self.Create7x7Lines()
self.Create9x9Lines()
return
def Create3x3Lines(self):
lines = {}
lines[0] = [1,0,1,2]
lines[45] = [2,0,0,2]
lines[90] = [0,1,2,1]
lines[135] = [0,0,2,2]
self.lines[3] = lines
return
def Create5x5Lines(self):
lines = {}
lines[0] = [2,0,2,4]
lines[22.5] = [3,0,1,4]
lines[45] = [0,4,4,0]
lines[67.5] = [0,3,4,1]
lines[90] = [0,2,4,2]
lines[112.5] = [0,1,4,3]
lines[135] = [0,0,4,4]
lines[157.5]= [1,0,3,4]
self.lines[5] = lines
return
def Create7x7Lines(self):
lines = {}
lines[0] = [3,0,3,6]
lines[15] = [4,0,2,6]
lines[30] = [5,0,1,6]
lines[45] = [6,0,0,6]
lines[60] = [6,1,0,5]
lines[75] = [6,2,0,4]
lines[90] = [0,3,6,3]
lines[105] = [0,2,6,4]
lines[120] = [0,1,6,5]
lines[135] = [0,0,6,6]
lines[150] = [1,0,5,6]
lines[165] = [2,0,4,6]
self.lines[7] = lines
return
def Create9x9Lines(self):
lines = {}
lines[0] = [4,0,4,8]
lines[11.25] = [5,0,3,8]
lines[22.5] = [6,0,2,8]
lines[33.75] = [7,0,1,8]
lines[45] = [8,0,0,8]
lines[56.25] = [8,1,0,7]
lines[67.5] = [8,2,0,6]
lines[78.75] = [8,3,0,5]
lines[90] = [8,4,0,4]
lines[101.25] = [0,3,8,5]
lines[112.5] = [0,2,8,6]
lines[123.75] = [0,1,8,7]
lines[135] = [0,0,8,8]
lines[146.25] = [1,0,7,8]
lines[157.5] = [2,0,6,8]
lines[168.75] = [3,0,5,8]
self.lines[9] = lines
return
lineLengths =[3,5,7,9]
lineTypes = ["full", "right", "left"]
lineDict = LineDictionary()
def LinearMotionBlur_random(img):
lineLengthIdx = np.random.randint(0, len(lineLengths))
lineTypeIdx = np.random.randint(0, len(lineTypes))
lineLength = lineLengths[lineLengthIdx]
lineType = lineTypes[lineTypeIdx]
lineAngle = randomAngle(lineLength)
return LinearMotionBlur(img, lineLength, lineAngle, lineType)
def LinearMotionBlur(img, dim, angle, linetype='full'):
if len(img.shape) == 2:
h, w = img.shape
c = 1
img = img[...,np.newaxis]
elif len(img.shape) == 3:
h,w,c = img.shape
else:
raise ValueError('unsupported img.shape')
kernel = LineKernel(dim, angle, linetype)
imgs = []
for i in range(c):
imgs.append ( convolve2d(img[...,i], kernel, mode='same') )
img = np.stack(imgs, axis=-1)
img = np.squeeze(img)
return img
def LineKernel(dim, angle, linetype):
kernelwidth = dim
kernelCenter = int(math.floor(dim/2))
angle = SanitizeAngleValue(kernelCenter, angle)
kernel = np.zeros((kernelwidth, kernelwidth), dtype=np.float32)
lineAnchors = lineDict.lines[dim][angle]
if(linetype == 'right'):
lineAnchors[0] = kernelCenter
lineAnchors[1] = kernelCenter
if(linetype == 'left'):
lineAnchors[2] = kernelCenter
lineAnchors[3] = kernelCenter
rr,cc = line(lineAnchors[0], lineAnchors[1], lineAnchors[2], lineAnchors[3])
kernel[rr,cc]=1
normalizationFactor = np.count_nonzero(kernel)
kernel = kernel / normalizationFactor
return kernel
def SanitizeAngleValue(kernelCenter, angle):
numDistinctLines = kernelCenter * 4
angle = math.fmod(angle, 180.0)
validLineAngles = np.linspace(0,180, numDistinctLines, endpoint = False)
angle = nearestValue(angle, validLineAngles)
return angle
def nearestValue(theta, validAngles):
idx = (np.abs(validAngles-theta)).argmin()
return validAngles[idx]
def randomAngle(kerneldim):
kernelCenter = int(math.floor(kerneldim/2))
numDistinctLines = kernelCenter * 4
validLineAngles = np.linspace(0,180, numDistinctLines, endpoint = False)
angleIdx = np.random.randint(0, len(validLineAngles))
return int(validLineAngles[angleIdx])

View file

@ -49,15 +49,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ), [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ) [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
]) ])
#override #override
def onSave(self): def onSave(self):

View file

@ -33,12 +33,12 @@ class Model(ModelBase):
if self.is_training_mode: if self.is_training_mode:
f = SampleProcessor.TypeFlags f = SampleProcessor.TypeFlags
f_type = f.FACE_ALIGN_FULL f_type = f.FACE_TYPE_FULL
self.set_training_data_generators ([ self.set_training_data_generators ([
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True, normalize_tanh = True ), sample_process_options=SampleProcessor.Options(random_flip=True, motion_blur = [25, 1], normalize_tanh = True ),
output_sample_types=[ [f.TRANSFORMED | f_type | f.MODE_BGR_SHUFFLE, self.resolution], output_sample_types=[ [f.TRANSFORMED | f_type | f.MODE_BGR_SHUFFLE | f.OPT_APPLY_MOTION_BLUR, self.resolution],
[f.TRANSFORMED | f_type | f.MODE_M | f.FACE_MASK_FULL, self.resolution] [f.TRANSFORMED | f_type | f.MODE_M | f.FACE_MASK_FULL, self.resolution]
]), ]),

View file

@ -59,15 +59,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128], [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ), [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 128], [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ) [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
]) ])
#override #override

View file

@ -60,15 +60,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64], [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ), [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_BGR, 64], [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64],
[f.TRANSFORMED | f.FACE_ALIGN_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ) [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] )
]) ])
#override #override

View file

@ -56,15 +56,15 @@ class Model(ModelBase):
SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None,
debug=self.is_debug(), batch_size=self.batch_size, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ), [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_BGR, 128], [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128],
[f.TRANSFORMED | f.FACE_ALIGN_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ) [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] )
]) ])
#override #override

View file

@ -320,7 +320,7 @@ class SAEModel(ModelBase):
self.dst_sample_losses = [] self.dst_sample_losses = []
f = SampleProcessor.TypeFlags f = SampleProcessor.TypeFlags
face_type = f.FACE_ALIGN_FULL if self.options['face_type'] == 'f' else f.FACE_ALIGN_HALF face_type = f.FACE_TYPE_FULL if self.options['face_type'] == 'f' else f.FACE_TYPE_HALF
output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution] ] output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution] ]
output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_BGR, resolution // (2**i) ] for i in range(ms_count)] output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_BGR, resolution // (2**i) ] for i in range(ms_count)]

View file

@ -2,46 +2,58 @@ from enum import IntEnum
import numpy as np import numpy as np
import cv2 import cv2
import imagelib import imagelib
from facelib import LandmarksProcessor from facelib import LandmarksProcessor
from facelib import FaceType from facelib import FaceType
class SampleProcessor(object): class SampleProcessor(object):
class TypeFlags(IntEnum): class TypeFlags(IntEnum):
SOURCE = 0x00000001, SOURCE = 0x00000001,
WARPED = 0x00000002, WARPED = 0x00000002,
WARPED_TRANSFORMED = 0x00000004, WARPED_TRANSFORMED = 0x00000004,
TRANSFORMED = 0x00000008, TRANSFORMED = 0x00000008,
LANDMARKS_ARRAY = 0x00000010, #currently unused LANDMARKS_ARRAY = 0x00000010, #currently unused
RANDOM_CLOSE = 0x00000020, RANDOM_CLOSE = 0x00000020, #currently unused
MORPH_TO_RANDOM_CLOSE = 0x00000040, MORPH_TO_RANDOM_CLOSE = 0x00000040, #currently unused
FACE_ALIGN_HALF = 0x00000100, FACE_TYPE_HALF = 0x00000100,
FACE_ALIGN_FULL = 0x00000200, FACE_TYPE_FULL = 0x00000200,
FACE_ALIGN_HEAD = 0x00000400, FACE_TYPE_HEAD = 0x00000400, #currently unused
FACE_ALIGN_AVATAR = 0x00000800, FACE_TYPE_AVATAR = 0x00000800, #currently unused
FACE_MASK_FULL = 0x00001000, FACE_MASK_FULL = 0x00001000,
FACE_MASK_EYES = 0x00002000, FACE_MASK_EYES = 0x00002000, #currently unused
MODE_BGR = 0x01000000, #BGR MODE_BGR = 0x00010000, #BGR
MODE_G = 0x02000000, #Grayscale MODE_G = 0x00020000, #Grayscale
MODE_GGG = 0x04000000, #3xGrayscale MODE_GGG = 0x00040000, #3xGrayscale
MODE_M = 0x08000000, #mask only MODE_M = 0x00080000, #mask only
MODE_BGR_SHUFFLE = 0x10000000, #BGR shuffle MODE_BGR_SHUFFLE = 0x00100000, #BGR shuffle
OPT_APPLY_MOTION_BLUR = 0x10000000,
class Options(object): class Options(object):
def __init__(self, random_flip = True, normalize_tanh = False, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05]): #motion_blur = [chance_int, range] - chance 0..100 to apply to face (not mask), and range [1..3] where 3 is highest power of motion blur
def __init__(self, random_flip = True, normalize_tanh = False, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], motion_blur=None ):
self.random_flip = random_flip self.random_flip = random_flip
self.normalize_tanh = normalize_tanh self.normalize_tanh = normalize_tanh
self.rotation_range = rotation_range self.rotation_range = rotation_range
self.scale_range = scale_range self.scale_range = scale_range
self.tx_range = tx_range self.tx_range = tx_range
self.ty_range = ty_range self.ty_range = ty_range
self.motion_blur = motion_blur
if self.motion_blur is not None:
chance, range = self.motion_blur
chance = np.clip(chance, 0, 100)
range = [3,5,7,9][ : np.clip(range, 0, 3)+1 ]
self.motion_blur = (chance, range)
@staticmethod @staticmethod
def process (sample, sample_process_options, output_sample_types, debug): def process (sample, sample_process_options, output_sample_types, debug):
SPTF = SampleProcessor.TypeFlags
sample_bgr = sample.load_bgr() sample_bgr = sample.load_bgr()
h,w,c = sample_bgr.shape h,w,c = sample_bgr.shape
@ -68,40 +80,42 @@ class SampleProcessor(object):
size = sample_type[1] size = sample_type[1]
random_sub_size = 0 if len (sample_type) < 3 else min( sample_type[2] , size) random_sub_size = 0 if len (sample_type) < 3 else min( sample_type[2] , size)
if f & SampleProcessor.TypeFlags.SOURCE != 0: if f & SPTF.SOURCE != 0:
img_type = 0 img_type = 0
elif f & SampleProcessor.TypeFlags.WARPED != 0: elif f & SPTF.WARPED != 0:
img_type = 1 img_type = 1
elif f & SampleProcessor.TypeFlags.WARPED_TRANSFORMED != 0: elif f & SPTF.WARPED_TRANSFORMED != 0:
img_type = 2 img_type = 2
elif f & SampleProcessor.TypeFlags.TRANSFORMED != 0: elif f & SPTF.TRANSFORMED != 0:
img_type = 3 img_type = 3
elif f & SampleProcessor.TypeFlags.LANDMARKS_ARRAY != 0: elif f & SPTF.LANDMARKS_ARRAY != 0:
img_type = 4 img_type = 4
else: else:
raise ValueError ('expected SampleTypeFlags type') raise ValueError ('expected SampleTypeFlags type')
if f & SampleProcessor.TypeFlags.RANDOM_CLOSE != 0: if f & SPTF.RANDOM_CLOSE != 0:
img_type += 10 img_type += 10
elif f & SampleProcessor.TypeFlags.MORPH_TO_RANDOM_CLOSE != 0: elif f & SPTF.MORPH_TO_RANDOM_CLOSE != 0:
img_type += 20 img_type += 20
face_mask_type = 0 face_mask_type = 0
if f & SampleProcessor.TypeFlags.FACE_MASK_FULL != 0: if f & SPTF.FACE_MASK_FULL != 0:
face_mask_type = 1 face_mask_type = 1
elif f & SampleProcessor.TypeFlags.FACE_MASK_EYES != 0: elif f & SPTF.FACE_MASK_EYES != 0:
face_mask_type = 2 face_mask_type = 2
target_face_type = -1 target_face_type = -1
if f & SampleProcessor.TypeFlags.FACE_ALIGN_HALF != 0: if f & SPTF.FACE_TYPE_HALF != 0:
target_face_type = FaceType.HALF target_face_type = FaceType.HALF
elif f & SampleProcessor.TypeFlags.FACE_ALIGN_FULL != 0: elif f & SPTF.FACE_TYPE_FULL != 0:
target_face_type = FaceType.FULL target_face_type = FaceType.FULL
elif f & SampleProcessor.TypeFlags.FACE_ALIGN_HEAD != 0: elif f & SPTF.FACE_TYPE_HEAD != 0:
target_face_type = FaceType.HEAD target_face_type = FaceType.HEAD
elif f & SampleProcessor.TypeFlags.FACE_ALIGN_AVATAR != 0: elif f & SPTF.FACE_TYPE_AVATAR != 0:
target_face_type = FaceType.AVATAR target_face_type = FaceType.AVATAR
apply_motion_blur = f & SPTF.OPT_APPLY_MOTION_BLUR != 0
if img_type == 4: if img_type == 4:
l = sample.landmarks l = sample.landmarks
l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 ) l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
@ -151,8 +165,15 @@ class SampleProcessor(object):
cur_sample = sample cur_sample = sample
if is_face_sample: if is_face_sample:
if apply_motion_blur and sample_process_options.motion_blur is not None:
chance, mb_range = sample_process_options.motion_blur
if np.random.randint(100) < chance :
dim = mb_range[ np.random.randint(len(mb_range) ) ]
img = imagelib.LinearMotionBlur (img, dim, np.random.randint(180) )
if face_mask_type == 1: if face_mask_type == 1:
img = np.concatenate( (img, LandmarksProcessor.get_image_hull_mask (img.shape, cur_sample.landmarks, cur_sample.ie_polys) ), -1 ) mask = LandmarksProcessor.get_image_hull_mask (img.shape, cur_sample.landmarks, cur_sample.ie_polys)
img = np.concatenate( (img, mask ), -1 )
elif face_mask_type == 2: elif face_mask_type == 2:
mask = LandmarksProcessor.get_image_eye_mask (img.shape, cur_sample.landmarks) mask = LandmarksProcessor.get_image_eye_mask (img.shape, cur_sample.landmarks)
mask = np.expand_dims (cv2.blur (mask, ( w // 32, w // 32 ) ), -1) mask = np.expand_dims (cv2.blur (mask, ( w // 32, w // 32 ) ), -1)
@ -180,16 +201,16 @@ class SampleProcessor(object):
img_bgr = img[...,0:3] img_bgr = img[...,0:3]
img_mask = img[...,3:4] img_mask = img[...,3:4]
if f & SampleProcessor.TypeFlags.MODE_BGR != 0: if f & SPTF.MODE_BGR != 0:
img = img img = img
elif f & SampleProcessor.TypeFlags.MODE_BGR_SHUFFLE != 0: elif f & SPTF.MODE_BGR_SHUFFLE != 0:
img_bgr = np.take (img_bgr, np.random.permutation(img_bgr.shape[-1]), axis=-1) img_bgr = np.take (img_bgr, np.random.permutation(img_bgr.shape[-1]), axis=-1)
img = np.concatenate ( (img_bgr,img_mask) , -1 ) img = np.concatenate ( (img_bgr,img_mask) , -1 )
elif f & SampleProcessor.TypeFlags.MODE_G != 0: elif f & SPTF.MODE_G != 0:
img = np.concatenate ( (np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1),img_mask) , -1 ) img = np.concatenate ( (np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1),img_mask) , -1 )
elif f & SampleProcessor.TypeFlags.MODE_GGG != 0: elif f & SPTF.MODE_GGG != 0:
img = np.concatenate ( ( np.repeat ( np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1), (3,), -1), img_mask), -1) img = np.concatenate ( ( np.repeat ( np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1), (3,), -1), img_mask), -1)
elif is_face_sample and f & SampleProcessor.TypeFlags.MODE_M != 0: elif is_face_sample and f & SPTF.MODE_M != 0:
if face_mask_type== 0: if face_mask_type== 0:
raise ValueError ('no face_mask_type defined') raise ValueError ('no face_mask_type defined')
img = img_mask img = img_mask