refactoring

This commit is contained in:
Colombo 2020-03-01 19:09:50 +04:00
parent cbff72f597
commit 757ec77e44
4 changed files with 154 additions and 228 deletions

View file

@ -394,7 +394,7 @@ def color_transfer_mix(img_src,img_trg):
def color_transfer(ct_mode, img_src, img_trg):
"""
color transfer for [0,1] float inputs
color transfer for [0,1] float32 inputs
"""
if ct_mode == 'lct':
out = linear_color_transfer (img_src, img_trg)

View file

@ -155,6 +155,7 @@ class QModel(ModelBase):
devices = device_config.devices
resolution = self.resolution = 96
self.face_type = FaceType.FULL
ae_dims = 128
e_dims = 128
d_dims = 64
@ -357,9 +358,6 @@ class QModel(ModelBase):
# initializing sample generators
if self.is_training:
t = SampleProcessor.Types
face_type = t.FACE_TYPE_FULL
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
@ -370,16 +368,18 @@ class QModel(ModelBase):
self.set_training_data_generators ([
SampleGeneratorFace(training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution':resolution, },
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, },
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_HULL), 'data_format':nn.data_format, 'resolution': resolution } ],
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}
],
generators_count=src_generators_count ),
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution':resolution},
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_HULL), 'data_format':nn.data_format, 'resolution': resolution} ],
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}
],
generators_count=dst_generators_count )
])
@ -449,10 +449,8 @@ class QModel(ModelBase):
#override
def get_MergerConfig(self):
face_type = FaceType.FULL
import merger
return self.predictor_func, (self.resolution, self.resolution, 3), merger.MergerConfigMasked(face_type=face_type,
return self.predictor_func, (self.resolution, self.resolution, 3), merger.MergerConfigMasked(face_type=self.face_type,
default_mode = 'overlay',
)

View file

@ -344,6 +344,11 @@ class SAEHDModel(ModelBase):
devices = device_config.devices
self.resolution = resolution = self.options['resolution']
self.face_type = {'h' : FaceType.HALF,
'mf' : FaceType.MID_FULL,
'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE}[ self.options['face_type'] ]
learn_mask = self.options['learn_mask']
eyes_prio = self.options['eyes_prio']
archi = self.options['archi']
@ -722,23 +727,11 @@ class SAEHDModel(ModelBase):
# initializing sample generators
if self.is_training:
t = SampleProcessor.Types
if self.options['face_type'] == 'h':
face_type = t.FACE_TYPE_HALF
elif self.options['face_type'] == 'mf':
face_type = t.FACE_TYPE_MID_FULL
elif self.options['face_type'] == 'f':
face_type = t.FACE_TYPE_FULL
elif self.options['face_type'] == 'wf':
face_type = t.FACE_TYPE_WHOLE_FACE
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
random_ct_samples_path=training_data_dst_path if self.options['ct_mode'] != 'none' and not self.pretrain else None
t_img_warped = t.IMG_WARPED_TRANSFORMED if self.options['random_warp'] else t.IMG_TRANSFORMED
cpu_count = min(multiprocessing.cpu_count(), 8)
src_generators_count = cpu_count // 2
dst_generators_count = cpu_count // 2
@ -748,17 +741,17 @@ class SAEHDModel(ModelBase):
self.set_training_data_generators ([
SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types = [ {'types' : (t_img_warped, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, 'ct_mode': self.options['ct_mode'] },
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, 'ct_mode': self.options['ct_mode'] },
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_EYES_HULL), 'data_format':nn.data_format, 'resolution': resolution },
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':self.options['random_warp'], 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': self.options['ct_mode'], 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': self.options['ct_mode'], 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_EYES_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
],
generators_count=src_generators_count ),
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
output_sample_types = [ {'types' : (t_img_warped, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_EYES_HULL), 'data_format':nn.data_format, 'resolution': resolution},
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':self.options['random_warp'], 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_EYES_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
],
generators_count=dst_generators_count )
])
@ -904,17 +897,8 @@ class SAEHDModel(ModelBase):
#override
def get_MergerConfig(self):
if self.options['face_type'] == 'h':
face_type = FaceType.HALF
elif self.options['face_type'] == 'mf':
face_type = FaceType.MID_FULL
elif self.options['face_type'] == 'f':
face_type = FaceType.FULL
elif self.options['face_type'] == 'wf':
face_type = FaceType.WHOLE_FACE
import merger
return self.predictor_func, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=face_type,
return self.predictor_func, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type,
default_mode = 'overlay' if self.options['ct_mode'] != 'none' or self.options['face_style_power'] or self.options['bg_style_power'] else 'seamless',
)

View file

@ -8,42 +8,30 @@ from core import imagelib
from facelib import FaceType, LandmarksProcessor
class SampleProcessor(object):
class Types(IntEnum):
class SampleType(IntEnum):
NONE = 0
FACE_IMAGE = 1
FACE_MASK = 2
LANDMARKS_ARRAY = 3
PITCH_YAW_ROLL = 4
PITCH_YAW_ROLL_SIGMOID = 5
IMG_TYPE_BEGIN = 1
IMG_SOURCE = 1
IMG_WARPED = 2
IMG_WARPED_TRANSFORMED = 3
IMG_TRANSFORMED = 4
IMG_LANDMARKS_ARRAY = 5 #currently unused
IMG_PITCH_YAW_ROLL = 6
IMG_PITCH_YAW_ROLL_SIGMOID = 7
IMG_TYPE_END = 10
class ChannelType(IntEnum):
NONE = 0
BGR = 1 #BGR
G = 2 #Grayscale
GGG = 3 #3xGrayscale
BGR_SHUFFLE = 4 #BGR shuffle
BGR_RANDOM_HSV_SHIFT = 5
BGR_RANDOM_RGB_LEVELS = 6
G_MASK = 7
FACE_TYPE_BEGIN = 10
FACE_TYPE_HALF = 10
FACE_TYPE_MID_FULL = 11
FACE_TYPE_FULL = 12
FACE_TYPE_WHOLE_FACE = 13
FACE_TYPE_HEAD = 14 #currently unused
FACE_TYPE_AVATAR = 15 #currently unused
FACE_TYPE_FULL_NO_ALIGN = 16
FACE_TYPE_HEAD_NO_ALIGN = 17
FACE_TYPE_END = 20
MODE_BEGIN = 40
MODE_BGR = 40 #BGR
MODE_G = 41 #Grayscale
MODE_GGG = 42 #3xGrayscale
MODE_FACE_MASK_ALL_HULL = 43 #mask all hull as grayscale
MODE_FACE_MASK_EYES_HULL = 44 #mask eyes hull as grayscale
MODE_FACE_MASK_ALL_EYES_HULL = 45 #combo all + eyes as grayscale
MODE_FACE_MASK_STRUCT = 46 #mask structure as grayscale
MODE_BGR_SHUFFLE = 47 #BGR shuffle
MODE_BGR_RANDOM_HSV_SHIFT = 48
MODE_BGR_RANDOM_RGB_LEVELS = 49
MODE_END = 50
class FaceMaskType(IntEnum):
NONE = 0
ALL_HULL = 1 #mask all hull as grayscale
EYES_HULL = 2 #mask eyes hull as grayscale
ALL_EYES_HULL = 3 #combo all + eyes as grayscale
STRUCT = 4 #mask structure as grayscale
class Options(object):
def __init__(self, random_flip = True, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
@ -53,18 +41,11 @@ class SampleProcessor(object):
self.tx_range = tx_range
self.ty_range = ty_range
SPTF_FACETYPE_TO_FACETYPE = { Types.FACE_TYPE_HALF : FaceType.HALF,
Types.FACE_TYPE_MID_FULL : FaceType.MID_FULL,
Types.FACE_TYPE_FULL : FaceType.FULL,
Types.FACE_TYPE_WHOLE_FACE : FaceType.WHOLE_FACE,
Types.FACE_TYPE_HEAD : FaceType.HEAD,
Types.FACE_TYPE_FULL_NO_ALIGN : FaceType.FULL_NO_ALIGN,
Types.FACE_TYPE_HEAD_NO_ALIGN : FaceType.HEAD_NO_ALIGN,
}
@staticmethod
def process (samples, sample_process_options, output_sample_types, debug, ct_sample=None):
SPTF = SampleProcessor.Types
SPST = SampleProcessor.SampleType
SPCT = SampleProcessor.ChannelType
SPFMT = SampleProcessor.FaceMaskType
sample_rnd_seed = np.random.randint(0x80000000)
@ -83,76 +64,39 @@ class SampleProcessor(object):
outputs_sample = []
for opts in output_sample_types:
sample_type = opts.get('sample_type', SPST.NONE)
channel_type = opts.get('channel_type', SPCT.NONE)
resolution = opts.get('resolution', 0)
types = opts.get('types', [] )
warp = opts.get('warp', False)
transform = opts.get('transform', False)
motion_blur = opts.get('motion_blur', None)
gaussian_blur = opts.get('gaussian_blur', None)
ct_mode = opts.get('ct_mode', 'None')
normalize_tanh = opts.get('normalize_tanh', False)
ct_mode = opts.get('ct_mode', 'None')
data_format = opts.get('data_format', 'NHWC')
img_type = SPTF.NONE
target_face_type = SPTF.NONE
mode_type = SPTF.NONE
for t in types:
if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
img_type = t
elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
target_face_type = t
elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
mode_type = t
if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
if not is_face_sample:
raise ValueError("face_samples should be provided for sample_type FACE_*")
if is_face_sample:
if target_face_type == SPTF.NONE:
raise ValueError("target face type must be defined for face samples")
else:
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL:
raise ValueError("MODE_FACE_MASK_ALL_HULL applicable only for face samples")
if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
raise ValueError("MODE_FACE_MASK_EYES_HULL applicable only for face samples")
if mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
raise ValueError("MODE_FACE_MASK_ALL_EYES_HULL applicable only for face samples")
if mode_type == SPTF.MODE_FACE_MASK_STRUCT:
raise ValueError("MODE_FACE_MASK_STRUCT applicable only for face samples")
face_type = opts.get('face_type', None)
face_mask_type = opts.get('face_mask_type', SPFMT.NONE)
can_warp = (img_type==SPTF.IMG_WARPED or img_type==SPTF.IMG_WARPED_TRANSFORMED)
can_transform = (img_type==SPTF.IMG_WARPED_TRANSFORMED or img_type==SPTF.IMG_TRANSFORMED)
if face_type is None:
raise ValueError("face_type must be defined for face samples")
if img_type == SPTF.NONE:
raise ValueError ('expected IMG_ type')
if face_type > sample.face_type:
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_ft) )
if img_type == SPTF.IMG_LANDMARKS_ARRAY:
l = sample.landmarks
l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
l = np.clip(l, 0.0, 1.0)
out_sample = l
elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
pitch_yaw_roll = sample.get_pitch_yaw_roll()
if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
if params['flip']:
yaw = -yaw
if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
pitch = np.clip( (pitch / math.pi) / 2.0 + 0.5, 0, 1)
yaw = np.clip( (yaw / math.pi) / 2.0 + 0.5, 0, 1)
roll = np.clip( (roll / math.pi) / 2.0 + 0.5, 0, 1)
out_sample = (pitch, yaw, roll)
else:
if mode_type == SPTF.NONE:
raise ValueError ('expected MODE_ type')
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
if sample_type == SPST.FACE_MASK:
if face_mask_type == SPFMT.ALL_HULL or \
face_mask_type == SPFMT.EYES_HULL or \
face_mask_type == SPFMT.ALL_EYES_HULL:
if face_mask_type == SPFMT.ALL_HULL or \
face_mask_type == SPFMT.ALL_EYES_HULL:
if sample.eyebrows_expand_mod is not None:
all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
else:
@ -160,27 +104,42 @@ class SampleProcessor(object):
all_mask = np.clip(all_mask, 0, 1)
if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
if face_mask_type == SPFMT.EYES_HULL or \
face_mask_type == SPFMT.ALL_EYES_HULL:
eyes_mask = LandmarksProcessor.get_image_eye_mask (sample_bgr.shape, sample.landmarks)
eyes_mask = np.clip(eyes_mask, 0, 1)
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL:
if face_mask_type == SPFMT.ALL_HULL:
img = all_mask
elif mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
elif face_mask_type == SPFMT.EYES_HULL:
img = eyes_mask
elif mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
elif face_mask_type == SPFMT.ALL_EYES_HULL:
img = all_mask + eyes_mask
if sample.ie_polys is not None:
sample.ie_polys.overlay_mask(img)
elif mode_type == SPTF.MODE_FACE_MASK_STRUCT:
elif face_mask_type == SPFMT.STRUCT:
if sample.eyebrows_expand_mod is not None:
img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
else:
img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks)
if sample.ie_polys is not None:
sample.ie_polys.overlay_mask(img)
if sample.face_type == FaceType.MARK_ONLY:
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], face_type)
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_LINEAR )
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR )[...,None]
else:
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LINEAR )[...,None]
if channel_type == SPCT.G:
out_sample = img.astype(np.float32)
else:
raise ValueError("only channel_type.G supported for the mask")
elif sample_type == SPST.FACE_IMAGE:
img = sample_bgr
if motion_blur is not None:
chance, mb_max_size = motion_blur
@ -205,65 +164,31 @@ class SampleProcessor(object):
if gblur_rnd_chance < chance:
img = cv2.GaussianBlur(img, (gblur_rnd_kernel,) *2 , 0)
if is_face_sample:
target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[target_face_type]
if target_ft > sample.face_type:
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_ft) )
if sample.face_type == FaceType.MARK_ONLY:
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], target_ft)
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_STRUCT:
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_LINEAR )
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR )[...,None]
else:
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], face_type)
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
else:
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, target_ft)
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_STRUCT:
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LINEAR )[...,None]
else:
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
else:
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
mode_type == SPTF.MODE_FACE_MASK_STRUCT:
out_sample = img.astype(np.float32)
else:
img = np.clip(img.astype(np.float32), 0, 1)
# Apply random color transfer
if ct_mode is not None and ct_sample is not None:
if ct_sample_bgr is None:
ct_sample_bgr = ct_sample.load_bgr()
img = imagelib.color_transfer (ct_mode,
img,
cv2.resize( ct_sample_bgr, (resolution,resolution), cv2.INTER_LINEAR ) )
img = imagelib.color_transfer (ct_mode, img, cv2.resize( ct_sample_bgr, (resolution,resolution), cv2.INTER_LINEAR ) )
if mode_type == SPTF.MODE_BGR:
# Transform from BGR to desired channel_type
if channel_type == SPCT.BGR:
out_sample = img
elif mode_type == SPTF.MODE_BGR_SHUFFLE:
elif channel_type == SPCT.BGR_SHUFFLE:
l_rnd_state = np.random.RandomState (sample_rnd_seed)
out_sample = np.take (img, l_rnd_state.permutation(img.shape[-1]), axis=-1)
elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
elif channel_type == SPCT.BGR_RANDOM_HSV_SHIFT:
l_rnd_state = np.random.RandomState (sample_rnd_seed)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
@ -272,31 +197,50 @@ class SampleProcessor(object):
v = np.clip ( v + l_rnd_state.random()-0.5, 0, 1 )
hsv = cv2.merge([h, s, v])
out_sample = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 )
elif mode_type == SPTF.MODE_BGR_RANDOM_RGB_LEVELS:
elif channel_type == SPCT.BGR_RANDOM_RGB_LEVELS:
l_rnd_state = np.random.RandomState (sample_rnd_seed)
np_rnd = l_rnd_state.rand
inBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32)
inWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32)
inGamma = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32)
outBlack = np.array([0.0, 0.0, 0.0], dtype=np.float32)
outWhite = np.array([1.0, 1.0, 1.0], dtype=np.float32)
out_sample = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 )
out_sample = ( out_sample ** (1/inGamma) ) * (outWhite - outBlack) + outBlack
out_sample = np.clip(out_sample, 0, 1)
elif mode_type == SPTF.MODE_G:
elif channel_type == SPCT.G:
out_sample = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[...,None]
elif mode_type == SPTF.MODE_GGG:
elif channel_type == SPCT.GGG:
out_sample = np.repeat ( np.expand_dims(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),-1), (3,), -1)
# Final transformations
if not debug:
if normalize_tanh:
out_sample = np.clip (out_sample * 2.0 - 1.0, -1.0, 1.0)
if data_format == "NCHW":
out_sample = np.transpose(out_sample, (2,0,1) )
#else:
# img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
# img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
elif sample_type == SPST.LANDMARKS_ARRAY:
l = sample.landmarks
l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
l = np.clip(l, 0.0, 1.0)
out_sample = l
elif sample_type == SPST.PITCH_YAW_ROLL or sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
pitch_yaw_roll = sample.get_pitch_yaw_roll()
if params['flip']:
yaw = -yaw
if sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
pitch = np.clip( (pitch / math.pi) / 2.0 + 0.5, 0, 1)
yaw = np.clip( (yaw / math.pi) / 2.0 + 0.5, 0, 1)
roll = np.clip( (roll / math.pi) / 2.0 + 0.5, 0, 1)
out_sample = (pitch, yaw, roll)
else:
raise ValueError ('expected sample_type')
outputs_sample.append ( out_sample )
outputs += [outputs_sample]