mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 13:02:15 -07:00
refactoring
This commit is contained in:
parent
cbff72f597
commit
757ec77e44
4 changed files with 154 additions and 228 deletions
|
@ -394,7 +394,7 @@ def color_transfer_mix(img_src,img_trg):
|
||||||
|
|
||||||
def color_transfer(ct_mode, img_src, img_trg):
|
def color_transfer(ct_mode, img_src, img_trg):
|
||||||
"""
|
"""
|
||||||
color transfer for [0,1] float inputs
|
color transfer for [0,1] float32 inputs
|
||||||
"""
|
"""
|
||||||
if ct_mode == 'lct':
|
if ct_mode == 'lct':
|
||||||
out = linear_color_transfer (img_src, img_trg)
|
out = linear_color_transfer (img_src, img_trg)
|
||||||
|
|
|
@ -155,6 +155,7 @@ class QModel(ModelBase):
|
||||||
devices = device_config.devices
|
devices = device_config.devices
|
||||||
|
|
||||||
resolution = self.resolution = 96
|
resolution = self.resolution = 96
|
||||||
|
self.face_type = FaceType.FULL
|
||||||
ae_dims = 128
|
ae_dims = 128
|
||||||
e_dims = 128
|
e_dims = 128
|
||||||
d_dims = 64
|
d_dims = 64
|
||||||
|
@ -357,9 +358,6 @@ class QModel(ModelBase):
|
||||||
|
|
||||||
# initializing sample generators
|
# initializing sample generators
|
||||||
if self.is_training:
|
if self.is_training:
|
||||||
t = SampleProcessor.Types
|
|
||||||
face_type = t.FACE_TYPE_FULL
|
|
||||||
|
|
||||||
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
|
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
|
||||||
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
|
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
|
||||||
|
|
||||||
|
@ -370,16 +368,18 @@ class QModel(ModelBase):
|
||||||
self.set_training_data_generators ([
|
self.set_training_data_generators ([
|
||||||
SampleGeneratorFace(training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
SampleGeneratorFace(training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||||
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
|
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
|
||||||
output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution':resolution, },
|
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, },
|
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_HULL), 'data_format':nn.data_format, 'resolution': resolution } ],
|
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}
|
||||||
|
],
|
||||||
generators_count=src_generators_count ),
|
generators_count=src_generators_count ),
|
||||||
|
|
||||||
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||||
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
|
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False),
|
||||||
output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution':resolution},
|
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
|
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_HULL), 'data_format':nn.data_format, 'resolution': resolution} ],
|
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}
|
||||||
|
],
|
||||||
generators_count=dst_generators_count )
|
generators_count=dst_generators_count )
|
||||||
])
|
])
|
||||||
|
|
||||||
|
@ -449,10 +449,8 @@ class QModel(ModelBase):
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def get_MergerConfig(self):
|
def get_MergerConfig(self):
|
||||||
face_type = FaceType.FULL
|
|
||||||
|
|
||||||
import merger
|
import merger
|
||||||
return self.predictor_func, (self.resolution, self.resolution, 3), merger.MergerConfigMasked(face_type=face_type,
|
return self.predictor_func, (self.resolution, self.resolution, 3), merger.MergerConfigMasked(face_type=self.face_type,
|
||||||
default_mode = 'overlay',
|
default_mode = 'overlay',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -344,6 +344,11 @@ class SAEHDModel(ModelBase):
|
||||||
devices = device_config.devices
|
devices = device_config.devices
|
||||||
|
|
||||||
self.resolution = resolution = self.options['resolution']
|
self.resolution = resolution = self.options['resolution']
|
||||||
|
self.face_type = {'h' : FaceType.HALF,
|
||||||
|
'mf' : FaceType.MID_FULL,
|
||||||
|
'f' : FaceType.FULL,
|
||||||
|
'wf' : FaceType.WHOLE_FACE}[ self.options['face_type'] ]
|
||||||
|
|
||||||
learn_mask = self.options['learn_mask']
|
learn_mask = self.options['learn_mask']
|
||||||
eyes_prio = self.options['eyes_prio']
|
eyes_prio = self.options['eyes_prio']
|
||||||
archi = self.options['archi']
|
archi = self.options['archi']
|
||||||
|
@ -722,23 +727,11 @@ class SAEHDModel(ModelBase):
|
||||||
|
|
||||||
# initializing sample generators
|
# initializing sample generators
|
||||||
if self.is_training:
|
if self.is_training:
|
||||||
t = SampleProcessor.Types
|
|
||||||
if self.options['face_type'] == 'h':
|
|
||||||
face_type = t.FACE_TYPE_HALF
|
|
||||||
elif self.options['face_type'] == 'mf':
|
|
||||||
face_type = t.FACE_TYPE_MID_FULL
|
|
||||||
elif self.options['face_type'] == 'f':
|
|
||||||
face_type = t.FACE_TYPE_FULL
|
|
||||||
elif self.options['face_type'] == 'wf':
|
|
||||||
face_type = t.FACE_TYPE_WHOLE_FACE
|
|
||||||
|
|
||||||
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
|
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
|
||||||
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
|
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
|
||||||
|
|
||||||
random_ct_samples_path=training_data_dst_path if self.options['ct_mode'] != 'none' and not self.pretrain else None
|
random_ct_samples_path=training_data_dst_path if self.options['ct_mode'] != 'none' and not self.pretrain else None
|
||||||
|
|
||||||
t_img_warped = t.IMG_WARPED_TRANSFORMED if self.options['random_warp'] else t.IMG_TRANSFORMED
|
|
||||||
|
|
||||||
cpu_count = min(multiprocessing.cpu_count(), 8)
|
cpu_count = min(multiprocessing.cpu_count(), 8)
|
||||||
src_generators_count = cpu_count // 2
|
src_generators_count = cpu_count // 2
|
||||||
dst_generators_count = cpu_count // 2
|
dst_generators_count = cpu_count // 2
|
||||||
|
@ -748,17 +741,17 @@ class SAEHDModel(ModelBase):
|
||||||
self.set_training_data_generators ([
|
self.set_training_data_generators ([
|
||||||
SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||||
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
|
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
|
||||||
output_sample_types = [ {'types' : (t_img_warped, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, 'ct_mode': self.options['ct_mode'] },
|
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':self.options['random_warp'], 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': self.options['ct_mode'], 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution, 'ct_mode': self.options['ct_mode'] },
|
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': self.options['ct_mode'], 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_EYES_HULL), 'data_format':nn.data_format, 'resolution': resolution },
|
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_EYES_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
],
|
],
|
||||||
generators_count=src_generators_count ),
|
generators_count=src_generators_count ),
|
||||||
|
|
||||||
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||||
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
|
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip),
|
||||||
output_sample_types = [ {'types' : (t_img_warped, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
|
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':self.options['random_warp'], 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'data_format':nn.data_format, 'resolution': resolution},
|
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_FACE_MASK_ALL_EYES_HULL), 'data_format':nn.data_format, 'resolution': resolution},
|
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.ALL_EYES_HULL, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
],
|
],
|
||||||
generators_count=dst_generators_count )
|
generators_count=dst_generators_count )
|
||||||
])
|
])
|
||||||
|
@ -904,17 +897,8 @@ class SAEHDModel(ModelBase):
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def get_MergerConfig(self):
|
def get_MergerConfig(self):
|
||||||
if self.options['face_type'] == 'h':
|
|
||||||
face_type = FaceType.HALF
|
|
||||||
elif self.options['face_type'] == 'mf':
|
|
||||||
face_type = FaceType.MID_FULL
|
|
||||||
elif self.options['face_type'] == 'f':
|
|
||||||
face_type = FaceType.FULL
|
|
||||||
elif self.options['face_type'] == 'wf':
|
|
||||||
face_type = FaceType.WHOLE_FACE
|
|
||||||
|
|
||||||
import merger
|
import merger
|
||||||
return self.predictor_func, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=face_type,
|
return self.predictor_func, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type,
|
||||||
default_mode = 'overlay' if self.options['ct_mode'] != 'none' or self.options['face_style_power'] or self.options['bg_style_power'] else 'seamless',
|
default_mode = 'overlay' if self.options['ct_mode'] != 'none' or self.options['face_style_power'] or self.options['bg_style_power'] else 'seamless',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -8,42 +8,30 @@ from core import imagelib
|
||||||
from facelib import FaceType, LandmarksProcessor
|
from facelib import FaceType, LandmarksProcessor
|
||||||
|
|
||||||
class SampleProcessor(object):
|
class SampleProcessor(object):
|
||||||
class Types(IntEnum):
|
class SampleType(IntEnum):
|
||||||
NONE = 0
|
NONE = 0
|
||||||
|
FACE_IMAGE = 1
|
||||||
|
FACE_MASK = 2
|
||||||
|
LANDMARKS_ARRAY = 3
|
||||||
|
PITCH_YAW_ROLL = 4
|
||||||
|
PITCH_YAW_ROLL_SIGMOID = 5
|
||||||
|
|
||||||
IMG_TYPE_BEGIN = 1
|
class ChannelType(IntEnum):
|
||||||
IMG_SOURCE = 1
|
NONE = 0
|
||||||
IMG_WARPED = 2
|
BGR = 1 #BGR
|
||||||
IMG_WARPED_TRANSFORMED = 3
|
G = 2 #Grayscale
|
||||||
IMG_TRANSFORMED = 4
|
GGG = 3 #3xGrayscale
|
||||||
IMG_LANDMARKS_ARRAY = 5 #currently unused
|
BGR_SHUFFLE = 4 #BGR shuffle
|
||||||
IMG_PITCH_YAW_ROLL = 6
|
BGR_RANDOM_HSV_SHIFT = 5
|
||||||
IMG_PITCH_YAW_ROLL_SIGMOID = 7
|
BGR_RANDOM_RGB_LEVELS = 6
|
||||||
IMG_TYPE_END = 10
|
G_MASK = 7
|
||||||
|
|
||||||
FACE_TYPE_BEGIN = 10
|
class FaceMaskType(IntEnum):
|
||||||
FACE_TYPE_HALF = 10
|
NONE = 0
|
||||||
FACE_TYPE_MID_FULL = 11
|
ALL_HULL = 1 #mask all hull as grayscale
|
||||||
FACE_TYPE_FULL = 12
|
EYES_HULL = 2 #mask eyes hull as grayscale
|
||||||
FACE_TYPE_WHOLE_FACE = 13
|
ALL_EYES_HULL = 3 #combo all + eyes as grayscale
|
||||||
FACE_TYPE_HEAD = 14 #currently unused
|
STRUCT = 4 #mask structure as grayscale
|
||||||
FACE_TYPE_AVATAR = 15 #currently unused
|
|
||||||
FACE_TYPE_FULL_NO_ALIGN = 16
|
|
||||||
FACE_TYPE_HEAD_NO_ALIGN = 17
|
|
||||||
FACE_TYPE_END = 20
|
|
||||||
|
|
||||||
MODE_BEGIN = 40
|
|
||||||
MODE_BGR = 40 #BGR
|
|
||||||
MODE_G = 41 #Grayscale
|
|
||||||
MODE_GGG = 42 #3xGrayscale
|
|
||||||
MODE_FACE_MASK_ALL_HULL = 43 #mask all hull as grayscale
|
|
||||||
MODE_FACE_MASK_EYES_HULL = 44 #mask eyes hull as grayscale
|
|
||||||
MODE_FACE_MASK_ALL_EYES_HULL = 45 #combo all + eyes as grayscale
|
|
||||||
MODE_FACE_MASK_STRUCT = 46 #mask structure as grayscale
|
|
||||||
MODE_BGR_SHUFFLE = 47 #BGR shuffle
|
|
||||||
MODE_BGR_RANDOM_HSV_SHIFT = 48
|
|
||||||
MODE_BGR_RANDOM_RGB_LEVELS = 49
|
|
||||||
MODE_END = 50
|
|
||||||
|
|
||||||
class Options(object):
|
class Options(object):
|
||||||
def __init__(self, random_flip = True, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
|
def __init__(self, random_flip = True, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
|
||||||
|
@ -53,21 +41,14 @@ class SampleProcessor(object):
|
||||||
self.tx_range = tx_range
|
self.tx_range = tx_range
|
||||||
self.ty_range = ty_range
|
self.ty_range = ty_range
|
||||||
|
|
||||||
SPTF_FACETYPE_TO_FACETYPE = { Types.FACE_TYPE_HALF : FaceType.HALF,
|
|
||||||
Types.FACE_TYPE_MID_FULL : FaceType.MID_FULL,
|
|
||||||
Types.FACE_TYPE_FULL : FaceType.FULL,
|
|
||||||
Types.FACE_TYPE_WHOLE_FACE : FaceType.WHOLE_FACE,
|
|
||||||
Types.FACE_TYPE_HEAD : FaceType.HEAD,
|
|
||||||
Types.FACE_TYPE_FULL_NO_ALIGN : FaceType.FULL_NO_ALIGN,
|
|
||||||
Types.FACE_TYPE_HEAD_NO_ALIGN : FaceType.HEAD_NO_ALIGN,
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def process (samples, sample_process_options, output_sample_types, debug, ct_sample=None):
|
def process (samples, sample_process_options, output_sample_types, debug, ct_sample=None):
|
||||||
SPTF = SampleProcessor.Types
|
SPST = SampleProcessor.SampleType
|
||||||
|
SPCT = SampleProcessor.ChannelType
|
||||||
|
SPFMT = SampleProcessor.FaceMaskType
|
||||||
|
|
||||||
sample_rnd_seed = np.random.randint(0x80000000)
|
sample_rnd_seed = np.random.randint(0x80000000)
|
||||||
|
|
||||||
outputs = []
|
outputs = []
|
||||||
for sample in samples:
|
for sample in samples:
|
||||||
sample_bgr = sample.load_bgr()
|
sample_bgr = sample.load_bgr()
|
||||||
|
@ -83,104 +64,82 @@ class SampleProcessor(object):
|
||||||
|
|
||||||
outputs_sample = []
|
outputs_sample = []
|
||||||
for opts in output_sample_types:
|
for opts in output_sample_types:
|
||||||
|
sample_type = opts.get('sample_type', SPST.NONE)
|
||||||
resolution = opts.get('resolution', 0)
|
channel_type = opts.get('channel_type', SPCT.NONE)
|
||||||
types = opts.get('types', [] )
|
resolution = opts.get('resolution', 0)
|
||||||
|
warp = opts.get('warp', False)
|
||||||
motion_blur = opts.get('motion_blur', None)
|
transform = opts.get('transform', False)
|
||||||
gaussian_blur = opts.get('gaussian_blur', None)
|
motion_blur = opts.get('motion_blur', None)
|
||||||
|
gaussian_blur = opts.get('gaussian_blur', None)
|
||||||
ct_mode = opts.get('ct_mode', 'None')
|
|
||||||
normalize_tanh = opts.get('normalize_tanh', False)
|
normalize_tanh = opts.get('normalize_tanh', False)
|
||||||
data_format = opts.get('data_format', 'NHWC')
|
ct_mode = opts.get('ct_mode', 'None')
|
||||||
|
data_format = opts.get('data_format', 'NHWC')
|
||||||
|
|
||||||
|
if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
|
||||||
|
if not is_face_sample:
|
||||||
|
raise ValueError("face_samples should be provided for sample_type FACE_*")
|
||||||
|
|
||||||
|
if is_face_sample:
|
||||||
|
face_type = opts.get('face_type', None)
|
||||||
|
face_mask_type = opts.get('face_mask_type', SPFMT.NONE)
|
||||||
|
|
||||||
|
if face_type is None:
|
||||||
|
raise ValueError("face_type must be defined for face samples")
|
||||||
|
|
||||||
|
if face_type > sample.face_type:
|
||||||
|
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_ft) )
|
||||||
|
|
||||||
img_type = SPTF.NONE
|
if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
|
||||||
target_face_type = SPTF.NONE
|
|
||||||
mode_type = SPTF.NONE
|
|
||||||
for t in types:
|
|
||||||
if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
|
|
||||||
img_type = t
|
|
||||||
elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
|
|
||||||
target_face_type = t
|
|
||||||
elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
|
|
||||||
mode_type = t
|
|
||||||
|
|
||||||
|
if sample_type == SPST.FACE_MASK:
|
||||||
|
if face_mask_type == SPFMT.ALL_HULL or \
|
||||||
|
face_mask_type == SPFMT.EYES_HULL or \
|
||||||
|
face_mask_type == SPFMT.ALL_EYES_HULL:
|
||||||
|
if face_mask_type == SPFMT.ALL_HULL or \
|
||||||
|
face_mask_type == SPFMT.ALL_EYES_HULL:
|
||||||
|
if sample.eyebrows_expand_mod is not None:
|
||||||
|
all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
|
||||||
|
else:
|
||||||
|
all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks)
|
||||||
|
|
||||||
if is_face_sample:
|
all_mask = np.clip(all_mask, 0, 1)
|
||||||
if target_face_type == SPTF.NONE:
|
|
||||||
raise ValueError("target face type must be defined for face samples")
|
|
||||||
else:
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL:
|
|
||||||
raise ValueError("MODE_FACE_MASK_ALL_HULL applicable only for face samples")
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
|
|
||||||
raise ValueError("MODE_FACE_MASK_EYES_HULL applicable only for face samples")
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
|
|
||||||
raise ValueError("MODE_FACE_MASK_ALL_EYES_HULL applicable only for face samples")
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
|
||||||
raise ValueError("MODE_FACE_MASK_STRUCT applicable only for face samples")
|
|
||||||
|
|
||||||
can_warp = (img_type==SPTF.IMG_WARPED or img_type==SPTF.IMG_WARPED_TRANSFORMED)
|
if face_mask_type == SPFMT.EYES_HULL or \
|
||||||
can_transform = (img_type==SPTF.IMG_WARPED_TRANSFORMED or img_type==SPTF.IMG_TRANSFORMED)
|
face_mask_type == SPFMT.ALL_EYES_HULL:
|
||||||
|
eyes_mask = LandmarksProcessor.get_image_eye_mask (sample_bgr.shape, sample.landmarks)
|
||||||
|
eyes_mask = np.clip(eyes_mask, 0, 1)
|
||||||
|
|
||||||
if img_type == SPTF.NONE:
|
if face_mask_type == SPFMT.ALL_HULL:
|
||||||
raise ValueError ('expected IMG_ type')
|
img = all_mask
|
||||||
|
elif face_mask_type == SPFMT.EYES_HULL:
|
||||||
if img_type == SPTF.IMG_LANDMARKS_ARRAY:
|
img = eyes_mask
|
||||||
l = sample.landmarks
|
elif face_mask_type == SPFMT.ALL_EYES_HULL:
|
||||||
l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
|
img = all_mask + eyes_mask
|
||||||
l = np.clip(l, 0.0, 1.0)
|
elif face_mask_type == SPFMT.STRUCT:
|
||||||
out_sample = l
|
|
||||||
elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
|
|
||||||
pitch_yaw_roll = sample.get_pitch_yaw_roll()
|
|
||||||
|
|
||||||
if params['flip']:
|
|
||||||
yaw = -yaw
|
|
||||||
|
|
||||||
if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
|
|
||||||
pitch = np.clip( (pitch / math.pi) / 2.0 + 0.5, 0, 1)
|
|
||||||
yaw = np.clip( (yaw / math.pi) / 2.0 + 0.5, 0, 1)
|
|
||||||
roll = np.clip( (roll / math.pi) / 2.0 + 0.5, 0, 1)
|
|
||||||
|
|
||||||
out_sample = (pitch, yaw, roll)
|
|
||||||
else:
|
|
||||||
if mode_type == SPTF.NONE:
|
|
||||||
raise ValueError ('expected MODE_ type')
|
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
|
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
|
|
||||||
if sample.eyebrows_expand_mod is not None:
|
if sample.eyebrows_expand_mod is not None:
|
||||||
all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
|
img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
|
||||||
else:
|
else:
|
||||||
all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks)
|
img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks)
|
||||||
|
|
||||||
all_mask = np.clip(all_mask, 0, 1)
|
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
|
|
||||||
eyes_mask = LandmarksProcessor.get_image_eye_mask (sample_bgr.shape, sample.landmarks)
|
|
||||||
eyes_mask = np.clip(eyes_mask, 0, 1)
|
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL:
|
|
||||||
img = all_mask
|
|
||||||
elif mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
|
|
||||||
img = eyes_mask
|
|
||||||
elif mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
|
|
||||||
img = all_mask + eyes_mask
|
|
||||||
|
|
||||||
if sample.ie_polys is not None:
|
if sample.ie_polys is not None:
|
||||||
sample.ie_polys.overlay_mask(img)
|
sample.ie_polys.overlay_mask(img)
|
||||||
|
|
||||||
elif mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
if sample.face_type == FaceType.MARK_ONLY:
|
||||||
if sample.eyebrows_expand_mod is not None:
|
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], face_type)
|
||||||
img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
|
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_LINEAR )
|
||||||
|
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
|
||||||
|
img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR )[...,None]
|
||||||
else:
|
else:
|
||||||
img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks)
|
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
|
||||||
else:
|
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
|
||||||
|
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LINEAR )[...,None]
|
||||||
|
|
||||||
|
if channel_type == SPCT.G:
|
||||||
|
out_sample = img.astype(np.float32)
|
||||||
|
else:
|
||||||
|
raise ValueError("only channel_type.G supported for the mask")
|
||||||
|
|
||||||
|
elif sample_type == SPST.FACE_IMAGE:
|
||||||
img = sample_bgr
|
img = sample_bgr
|
||||||
if motion_blur is not None:
|
if motion_blur is not None:
|
||||||
chance, mb_max_size = motion_blur
|
chance, mb_max_size = motion_blur
|
||||||
|
@ -197,7 +156,7 @@ class SampleProcessor(object):
|
||||||
if gaussian_blur is not None:
|
if gaussian_blur is not None:
|
||||||
chance, kernel_max_size = gaussian_blur
|
chance, kernel_max_size = gaussian_blur
|
||||||
chance = np.clip(chance, 0, 100)
|
chance = np.clip(chance, 0, 100)
|
||||||
|
|
||||||
l_rnd_state = np.random.RandomState (sample_rnd_seed+1)
|
l_rnd_state = np.random.RandomState (sample_rnd_seed+1)
|
||||||
gblur_rnd_chance = l_rnd_state.randint(100)
|
gblur_rnd_chance = l_rnd_state.randint(100)
|
||||||
gblur_rnd_kernel = l_rnd_state.randint(kernel_max_size)*2+1
|
gblur_rnd_kernel = l_rnd_state.randint(kernel_max_size)*2+1
|
||||||
|
@ -205,65 +164,31 @@ class SampleProcessor(object):
|
||||||
if gblur_rnd_chance < chance:
|
if gblur_rnd_chance < chance:
|
||||||
img = cv2.GaussianBlur(img, (gblur_rnd_kernel,) *2 , 0)
|
img = cv2.GaussianBlur(img, (gblur_rnd_kernel,) *2 , 0)
|
||||||
|
|
||||||
if is_face_sample:
|
|
||||||
target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[target_face_type]
|
|
||||||
if target_ft > sample.face_type:
|
|
||||||
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_ft) )
|
|
||||||
|
|
||||||
if sample.face_type == FaceType.MARK_ONLY:
|
if sample.face_type == FaceType.MARK_ONLY:
|
||||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], target_ft)
|
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], face_type)
|
||||||
|
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
|
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
|
||||||
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
|
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||||
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
|
||||||
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_LINEAR )
|
|
||||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
|
|
||||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR )[...,None]
|
|
||||||
else:
|
|
||||||
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
|
|
||||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
|
||||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, target_ft)
|
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
|
||||||
|
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
|
||||||
|
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
|
||||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
|
|
||||||
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LINEAR )[...,None]
|
|
||||||
else:
|
|
||||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
|
||||||
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
|
|
||||||
else:
|
|
||||||
img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=True)
|
|
||||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
|
||||||
|
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
|
|
||||||
mode_type == SPTF.MODE_FACE_MASK_STRUCT:
|
|
||||||
out_sample = img.astype(np.float32)
|
|
||||||
else:
|
|
||||||
img = np.clip(img.astype(np.float32), 0, 1)
|
img = np.clip(img.astype(np.float32), 0, 1)
|
||||||
|
|
||||||
|
# Apply random color transfer
|
||||||
if ct_mode is not None and ct_sample is not None:
|
if ct_mode is not None and ct_sample is not None:
|
||||||
if ct_sample_bgr is None:
|
if ct_sample_bgr is None:
|
||||||
ct_sample_bgr = ct_sample.load_bgr()
|
ct_sample_bgr = ct_sample.load_bgr()
|
||||||
img = imagelib.color_transfer (ct_mode,
|
img = imagelib.color_transfer (ct_mode, img, cv2.resize( ct_sample_bgr, (resolution,resolution), cv2.INTER_LINEAR ) )
|
||||||
img,
|
|
||||||
cv2.resize( ct_sample_bgr, (resolution,resolution), cv2.INTER_LINEAR ) )
|
|
||||||
|
|
||||||
if mode_type == SPTF.MODE_BGR:
|
# Transform from BGR to desired channel_type
|
||||||
|
if channel_type == SPCT.BGR:
|
||||||
out_sample = img
|
out_sample = img
|
||||||
elif mode_type == SPTF.MODE_BGR_SHUFFLE:
|
elif channel_type == SPCT.BGR_SHUFFLE:
|
||||||
l_rnd_state = np.random.RandomState (sample_rnd_seed)
|
l_rnd_state = np.random.RandomState (sample_rnd_seed)
|
||||||
out_sample = np.take (img, l_rnd_state.permutation(img.shape[-1]), axis=-1)
|
out_sample = np.take (img, l_rnd_state.permutation(img.shape[-1]), axis=-1)
|
||||||
|
elif channel_type == SPCT.BGR_RANDOM_HSV_SHIFT:
|
||||||
elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
|
|
||||||
l_rnd_state = np.random.RandomState (sample_rnd_seed)
|
l_rnd_state = np.random.RandomState (sample_rnd_seed)
|
||||||
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
||||||
h, s, v = cv2.split(hsv)
|
h, s, v = cv2.split(hsv)
|
||||||
|
@ -272,31 +197,50 @@ class SampleProcessor(object):
|
||||||
v = np.clip ( v + l_rnd_state.random()-0.5, 0, 1 )
|
v = np.clip ( v + l_rnd_state.random()-0.5, 0, 1 )
|
||||||
hsv = cv2.merge([h, s, v])
|
hsv = cv2.merge([h, s, v])
|
||||||
out_sample = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 )
|
out_sample = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 )
|
||||||
|
elif channel_type == SPCT.BGR_RANDOM_RGB_LEVELS:
|
||||||
elif mode_type == SPTF.MODE_BGR_RANDOM_RGB_LEVELS:
|
|
||||||
l_rnd_state = np.random.RandomState (sample_rnd_seed)
|
l_rnd_state = np.random.RandomState (sample_rnd_seed)
|
||||||
np_rnd = l_rnd_state.rand
|
np_rnd = l_rnd_state.rand
|
||||||
|
|
||||||
inBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32)
|
inBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32)
|
||||||
inWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32)
|
inWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32)
|
||||||
inGamma = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32)
|
inGamma = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32)
|
||||||
outBlack = np.array([0.0, 0.0, 0.0], dtype=np.float32)
|
outBlack = np.array([0.0, 0.0, 0.0], dtype=np.float32)
|
||||||
outWhite = np.array([1.0, 1.0, 1.0], dtype=np.float32)
|
outWhite = np.array([1.0, 1.0, 1.0], dtype=np.float32)
|
||||||
|
out_sample = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 )
|
||||||
out_sample = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 )
|
|
||||||
out_sample = ( out_sample ** (1/inGamma) ) * (outWhite - outBlack) + outBlack
|
out_sample = ( out_sample ** (1/inGamma) ) * (outWhite - outBlack) + outBlack
|
||||||
out_sample = np.clip(out_sample, 0, 1)
|
out_sample = np.clip(out_sample, 0, 1)
|
||||||
elif mode_type == SPTF.MODE_G:
|
elif channel_type == SPCT.G:
|
||||||
out_sample = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[...,None]
|
out_sample = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[...,None]
|
||||||
elif mode_type == SPTF.MODE_GGG:
|
elif channel_type == SPCT.GGG:
|
||||||
out_sample = np.repeat ( np.expand_dims(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),-1), (3,), -1)
|
out_sample = np.repeat ( np.expand_dims(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),-1), (3,), -1)
|
||||||
|
|
||||||
|
# Final transformations
|
||||||
if not debug:
|
if not debug:
|
||||||
if normalize_tanh:
|
if normalize_tanh:
|
||||||
out_sample = np.clip (out_sample * 2.0 - 1.0, -1.0, 1.0)
|
out_sample = np.clip (out_sample * 2.0 - 1.0, -1.0, 1.0)
|
||||||
|
|
||||||
if data_format == "NCHW":
|
if data_format == "NCHW":
|
||||||
out_sample = np.transpose(out_sample, (2,0,1) )
|
out_sample = np.transpose(out_sample, (2,0,1) )
|
||||||
|
#else:
|
||||||
|
# img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
|
||||||
|
# img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||||
|
elif sample_type == SPST.LANDMARKS_ARRAY:
|
||||||
|
l = sample.landmarks
|
||||||
|
l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
|
||||||
|
l = np.clip(l, 0.0, 1.0)
|
||||||
|
out_sample = l
|
||||||
|
elif sample_type == SPST.PITCH_YAW_ROLL or sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
|
||||||
|
pitch_yaw_roll = sample.get_pitch_yaw_roll()
|
||||||
|
|
||||||
|
if params['flip']:
|
||||||
|
yaw = -yaw
|
||||||
|
|
||||||
|
if sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
|
||||||
|
pitch = np.clip( (pitch / math.pi) / 2.0 + 0.5, 0, 1)
|
||||||
|
yaw = np.clip( (yaw / math.pi) / 2.0 + 0.5, 0, 1)
|
||||||
|
roll = np.clip( (roll / math.pi) / 2.0 + 0.5, 0, 1)
|
||||||
|
|
||||||
|
out_sample = (pitch, yaw, roll)
|
||||||
|
else:
|
||||||
|
raise ValueError ('expected sample_type')
|
||||||
|
|
||||||
outputs_sample.append ( out_sample )
|
outputs_sample.append ( out_sample )
|
||||||
outputs += [outputs_sample]
|
outputs += [outputs_sample]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue