diff --git a/models/Model_DEV_FANSEG/Model.py b/models/Model_DEV_FANSEG/Model.py index f84345c..6184a43 100644 --- a/models/Model_DEV_FANSEG/Model.py +++ b/models/Model_DEV_FANSEG/Model.py @@ -41,19 +41,19 @@ class Model(ModelBase): training=True) if self.is_training_mode: - f = SampleProcessor.TypeFlags - face_type = f.FACE_TYPE_FULL if self.options['face_type'] == 'f' else f.FACE_TYPE_HALF + t = SampleProcessor.Types + face_type = t.FACE_TYPE_FULL if self.options['face_type'] == 'f' else t.FACE_TYPE_HALF self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, - sample_process_options=SampleProcessor.Options(random_flip=True, motion_blur = [25, 1] ), - output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR_SHUFFLE | f.OPT_APPLY_MOTION_BLUR, self.resolution], - [f.WARPED_TRANSFORMED | face_type | f.MODE_M | f.FACE_MASK_FULL, self.resolution], + sample_process_options=SampleProcessor.Options(random_flip=True), + output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR_SHUFFLE), 'resolution' : self.resolution, 'motion_blur':(25, 1) }, + { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_M, t.FACE_MASK_FULL), 'resolution': self.resolution }, ]), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True ), - output_sample_types=[ [f.TRANSFORMED | face_type | f.MODE_BGR_SHUFFLE, self.resolution], + output_sample_types=[ { 'types': (t.IMG_TRANSFORMED , face_type, t.MODE_BGR_SHUFFLE), 'resolution' : self.resolution}, ]) ]) diff --git a/models/Model_DEV_POSEEST/Model.py b/models/Model_DEV_POSEEST/Model.py index 6e097b1..35613d0 100644 --- a/models/Model_DEV_POSEEST/Model.py +++ b/models/Model_DEV_POSEEST/Model.py @@ -42,22 +42,22 @@ class Model(ModelBase): training=True) if self.is_training_mode: - f = SampleProcessor.TypeFlags - face_type = f.FACE_TYPE_FULL if self.options['face_type'] == 'f' else f.FACE_TYPE_HALF + t = SampleProcessor.Types + face_type = t.FACE_TYPE_FULL if self.options['face_type'] == 'f' else t.FACE_TYPE_HALF - self.set_training_data_generators ([ - SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, generators_count=4, - sample_process_options=SampleProcessor.Options( rotation_range=[0,0], motion_blur = [25, 1] ), #random_flip=True, - output_sample_types=[ [f.TRANSFORMED | face_type | f.MODE_BGR_SHUFFLE | f.OPT_APPLY_MOTION_BLUR, self.resolution ], - [f.PITCH_YAW_ROLL], - ]), - - SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, generators_count=4, - sample_process_options=SampleProcessor.Options( rotation_range=[0,0] ), #random_flip=True, - output_sample_types=[ [f.TRANSFORMED | face_type | f.MODE_BGR_SHUFFLE, self.resolution ], - [f.PITCH_YAW_ROLL], + self.set_training_data_generators ([ + SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, generators_count=4, + sample_process_options=SampleProcessor.Options( rotation_range=[0,0] ), #random_flip=True, + output_sample_types=[ {'types': (t.IMG_TRANSFORMED, face_type, t.MODE_BGR_SHUFFLE), 'resolution':self.resolution, 'motion_blur':(25, 1) }, + {'types': (t.IMG_PITCH_YAW_ROLL,)} + ]), + + SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, generators_count=4, + sample_process_options=SampleProcessor.Options( rotation_range=[0,0] ), #random_flip=True, + output_sample_types=[ {'types': (t.IMG_TRANSFORMED, face_type, t.MODE_BGR_SHUFFLE), 'resolution':self.resolution }, + {'types': (t.IMG_PITCH_YAW_ROLL,)} + ]) ]) - ]) #override def onSave(self): diff --git a/models/Model_DF/Model.py b/models/Model_DF/Model.py index 308b074..879144a 100644 --- a/models/Model_DF/Model.py +++ b/models/Model_DF/Model.py @@ -44,20 +44,20 @@ class Model(ModelBase): self.convert = K.function([ae_input_layer], rec_src) if self.is_training_mode: - f = SampleProcessor.TypeFlags + t = SampleProcessor.Types + output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_FULL, t.MODE_BGR), 'resolution':128}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL, t.MODE_BGR), 'resolution':128}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL, t.MODE_M, t.FACE_MASK_FULL), 'resolution':128} ] + self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ), + output_sample_types=output_sample_types), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ) + output_sample_types=output_sample_types) ]) #override def onSave(self): diff --git a/models/Model_H128/Model.py b/models/Model_H128/Model.py index befff31..11041e6 100644 --- a/models/Model_H128/Model.py +++ b/models/Model_H128/Model.py @@ -54,20 +54,20 @@ class Model(ModelBase): self.dst_view = K.function([input_dst_bgr],[rec_dst_bgr, rec_dst_mask]) if self.is_training_mode: - f = SampleProcessor.TypeFlags + t = SampleProcessor.Types + output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_HALF, t.MODE_BGR), 'resolution':128}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_HALF, t.MODE_BGR), 'resolution':128}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_HALF, t.MODE_M, t.FACE_MASK_FULL), 'resolution':128} ] + self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ), + output_sample_types=output_sample_types ), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 128] ] ) + output_sample_types=output_sample_types ) ]) #override diff --git a/models/Model_H64/Model.py b/models/Model_H64/Model.py index 3b09d24..d37eaa3 100644 --- a/models/Model_H64/Model.py +++ b/models/Model_H64/Model.py @@ -55,20 +55,20 @@ class Model(ModelBase): self.dst_view = K.function([input_dst_bgr],[rec_dst_bgr, rec_dst_mask]) if self.is_training_mode: - f = SampleProcessor.TypeFlags + t = SampleProcessor.Types + output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_HALF, t.MODE_BGR), 'resolution':64}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_HALF, t.MODE_BGR), 'resolution':64}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_HALF, t.MODE_M, t.FACE_MASK_FULL), 'resolution':64} ] + self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ), + output_sample_types=output_sample_types), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_BGR, 64], - [f.TRANSFORMED | f.FACE_TYPE_HALF | f.MODE_M | f.FACE_MASK_FULL, 64] ] ) + output_sample_types=output_sample_types) ]) #override diff --git a/models/Model_LIAEF128/Model.py b/models/Model_LIAEF128/Model.py index 45ca7ca..2fe439a 100644 --- a/models/Model_LIAEF128/Model.py +++ b/models/Model_LIAEF128/Model.py @@ -49,22 +49,20 @@ class Model(ModelBase): if self.is_training_mode: - f = SampleProcessor.TypeFlags + t = SampleProcessor.Types + output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, t.FACE_TYPE_FULL, t.MODE_BGR), 'resolution':128}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL, t.MODE_BGR), 'resolution':128}, + { 'types': (t.IMG_TRANSFORMED, t.FACE_TYPE_FULL, t.MODE_M, t.FACE_MASK_FULL), 'resolution':128} ] + self.set_training_data_generators ([ - - SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ), + output_sample_types=output_sample_types), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), - output_sample_types=[ [f.WARPED_TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_BGR, 128], - [f.TRANSFORMED | f.FACE_TYPE_FULL | f.MODE_M | f.FACE_MASK_FULL, 128] ] ) + output_sample_types=output_sample_types) ]) #override diff --git a/models/Model_RecycleGAN/Model.py b/models/Model_RecycleGAN/Model.py index 8014b27..0bb6ee8 100644 --- a/models/Model_RecycleGAN/Model.py +++ b/models/Model_RecycleGAN/Model.py @@ -184,17 +184,19 @@ class RecycleGANModel(ModelBase): if self.is_training_mode: - f = SampleProcessor.TypeFlags + t = SampleProcessor.Types + output_sample_types=[ { 'types': (t.IMG_SOURCE, t.MODE_BGR), 'resolution':resolution} ] + self.set_training_data_generators ([ SampleGeneratorImageTemporal(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, temporal_image_count=3, sample_process_options=SampleProcessor.Options(random_flip = False, normalize_tanh = True), - output_sample_types=[ [f.SOURCE | f.MODE_BGR, resolution] ] ), + output_sample_types=output_sample_types ), SampleGeneratorImageTemporal(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, temporal_image_count=3, sample_process_options=SampleProcessor.Options(random_flip = False, normalize_tanh = True), - output_sample_types=[ [f.SOURCE | f.MODE_BGR, resolution] ] ), + output_sample_types=output_sample_types ), ]) else: self.G_convert = K.function([real_B0],[fake_A0]) diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index 1eb2c07..bc2e3d1 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -325,12 +325,12 @@ class SAEModel(ModelBase): self.src_sample_losses = [] self.dst_sample_losses = [] - f = SampleProcessor.TypeFlags - face_type = f.FACE_TYPE_FULL if self.options['face_type'] == 'f' else f.FACE_TYPE_HALF + t = SampleProcessor.Types + face_type = t.FACE_TYPE_FULL if self.options['face_type'] == 'f' else t.FACE_TYPE_HALF - output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution] ] - output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_BGR, resolution // (2**i) ] for i in range(ms_count)] - output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_M | f.FACE_MASK_FULL, resolution // (2**i) ] for i in range(ms_count)] + output_sample_types = [ {'types' : (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'resolution':resolution} ] + output_sample_types += [ {'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'resolution': resolution // (2**i) } for i in range(ms_count)] + output_sample_types += [ {'types' : (t.IMG_TRANSFORMED, face_type, t.MODE_M, t.FACE_MASK_FULL), 'resolution': resolution // (2**i) } for i in range(ms_count)] self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, diff --git a/nnlib/nnlib.py b/nnlib/nnlib.py index 7279218..8a91a14 100644 --- a/nnlib/nnlib.py +++ b/nnlib/nnlib.py @@ -1009,7 +1009,7 @@ class CAInitializerMPSubprocessor(Subprocessor): #override def on_clients_initialized(self): - io.progress_bar ("Initializing", len (self.idx_shapes_list)) + io.progress_bar ("Initializing CA weights", len (self.idx_shapes_list)) #override def on_clients_finalized(self): diff --git a/samplelib/SampleGeneratorImageTemporal.py b/samplelib/SampleGeneratorImageTemporal.py index d370bda..190f98d 100644 --- a/samplelib/SampleGeneratorImageTemporal.py +++ b/samplelib/SampleGeneratorImageTemporal.py @@ -43,12 +43,11 @@ class SampleGeneratorImageTemporal(SampleGeneratorBase): raise ValueError('No training data provided.') mult_max = 4 - l = samples_len - (self.temporal_image_count-1)*mult_max + 1 - if l < 0: + samples_sub_len = samples_len - (self.temporal_image_count-1)*mult_max + if samples_sub_len <= 0: raise ValueError('Not enough samples to fit temporal line.') shuffle_idxs = [] - samples_sub_len = samples_len - l + 1 while True: diff --git a/samplelib/SampleProcessor.py b/samplelib/SampleProcessor.py index 7939aaa..0627a5a 100644 --- a/samplelib/SampleProcessor.py +++ b/samplelib/SampleProcessor.py @@ -1,61 +1,95 @@ +import collections from enum import IntEnum -import numpy as np -import cv2 -import imagelib -from facelib import LandmarksProcessor -from facelib import FaceType +import cv2 +import numpy as np + +import imagelib +from facelib import FaceType, LandmarksProcessor + + +""" +output_sample_types = [ + {} opts, + ... + ] + +opts: + 'types' : (S,S,...,S) + where S: + 'IMG_SOURCE' + 'IMG_WARPED' + 'IMG_WARPED_TRANSFORMED'' + 'IMG_TRANSFORMED' + 'IMG_LANDMARKS_ARRAY' #currently unused + 'IMG_PITCH_YAW_ROLL' + + 'FACE_TYPE_HALF' + 'FACE_TYPE_FULL' + 'FACE_TYPE_HEAD' #currently unused + 'FACE_TYPE_AVATAR' #currently unused + + 'FACE_MASK_FULL' + 'FACE_MASK_EYES' #currently unused + + 'MODE_BGR' #BGR + 'MODE_G' #Grayscale + 'MODE_GGG' #3xGrayscale + 'MODE_M' #mask only + 'MODE_BGR_SHUFFLE' #BGR shuffle + + 'resolution' : N + + 'motion_blur' : (chance_int, range) - chance 0..100 to apply to face (not mask), and range [1..3] where 3 is highest power of motion blur + +""" class SampleProcessor(object): - class TypeFlags(IntEnum): - SOURCE = 0x00000001, - WARPED = 0x00000002, - WARPED_TRANSFORMED = 0x00000004, - TRANSFORMED = 0x00000008, - LANDMARKS_ARRAY = 0x00000010, #currently unused - PITCH_YAW_ROLL = 0x00000020, - - RANDOM_CLOSE = 0x00000040, #currently unused - MORPH_TO_RANDOM_CLOSE = 0x00000080, #currently unused - - FACE_TYPE_HALF = 0x00000100, - FACE_TYPE_FULL = 0x00000200, - FACE_TYPE_HEAD = 0x00000400, #currently unused - FACE_TYPE_AVATAR = 0x00000800, #currently unused - - FACE_MASK_FULL = 0x00001000, - FACE_MASK_EYES = 0x00002000, #currently unused - - MODE_BGR = 0x00010000, #BGR - MODE_G = 0x00020000, #Grayscale - MODE_GGG = 0x00040000, #3xGrayscale - MODE_M = 0x00080000, #mask only - MODE_BGR_SHUFFLE = 0x00100000, #BGR shuffle + class Types(IntEnum): + NONE = 0 + IMG_TYPE_BEGIN = 1 + IMG_SOURCE = 1 + IMG_WARPED = 2 + IMG_WARPED_TRANSFORMED = 3 + IMG_TRANSFORMED = 4 + IMG_LANDMARKS_ARRAY = 5 #currently unused + IMG_PITCH_YAW_ROLL = 6 + IMG_TYPE_END = 6 + + FACE_TYPE_BEGIN = 7 + FACE_TYPE_HALF = 7 + FACE_TYPE_FULL = 8 + FACE_TYPE_HEAD = 9 #currently unused + FACE_TYPE_AVATAR = 10 #currently unused + FACE_TYPE_END = 10 + + FACE_MASK_BEGIN = 10 + FACE_MASK_FULL = 11 + FACE_MASK_EYES = 12 #currently unused + FACE_MASK_END = 12 + + MODE_BEGIN = 13 + MODE_BGR = 13 #BGR + MODE_G = 14 #Grayscale + MODE_GGG = 15 #3xGrayscale + MODE_M = 16 #mask only + MODE_BGR_SHUFFLE = 17 #BGR shuffle + MODE_END = 17 - - OPT_APPLY_MOTION_BLUR = 0x10000000, - class Options(object): - #motion_blur = [chance_int, range] - chance 0..100 to apply to face (not mask), and range [1..3] where 3 is highest power of motion blur - def __init__(self, random_flip = True, normalize_tanh = False, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], motion_blur=None ): + def __init__(self, random_flip = True, normalize_tanh = False, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ): self.random_flip = random_flip self.normalize_tanh = normalize_tanh self.rotation_range = rotation_range self.scale_range = scale_range self.tx_range = tx_range self.ty_range = ty_range - self.motion_blur = motion_blur - if self.motion_blur is not None: - chance, range = self.motion_blur - chance = np.clip(chance, 0, 100) - range = [3,5,7,9][ : np.clip(range, 0, 3)+1 ] - self.motion_blur = (chance, range) @staticmethod def process (sample, sample_process_options, output_sample_types, debug): - SPTF = SampleProcessor.TypeFlags + SPTF = SampleProcessor.Types sample_bgr = sample.load_bgr() h,w,c = sample_bgr.shape @@ -73,64 +107,49 @@ class SampleProcessor(object): params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range ) - images = [[None]*3 for _ in range(30)] - + cached_images = collections.defaultdict(dict) + sample_rnd_seed = np.random.randint(0x80000000) + SPTF_FACETYPE_TO_FACETYPE = { SPTF.FACE_TYPE_HALF : FaceType.HALF, + SPTF.FACE_TYPE_FULL : FaceType.FULL, + SPTF.FACE_TYPE_HEAD : FaceType.HEAD, + SPTF.FACE_TYPE_AVATAR : FaceType.AVATAR } + outputs = [] - for sample_type in output_sample_types: - f = sample_type[0] - size = 0 if len (sample_type) < 2 else sample_type[1] - opts = {} if len (sample_type) < 3 else sample_type[2] + for opts in output_sample_types: + + resolution = opts.get('resolution', 0) + types = opts.get('types', [] ) - random_sub_size = opts.get('random_sub_size', 0) + random_sub_res = opts.get('random_sub_res', 0) normalize_std_dev = opts.get('normalize_std_dev', False) normalize_vgg = opts.get('normalize_vgg', False) + motion_blur = opts.get('motion_blur', None) - if f & SPTF.SOURCE != 0: - img_type = 0 - elif f & SPTF.WARPED != 0: - img_type = 1 - elif f & SPTF.WARPED_TRANSFORMED != 0: - img_type = 2 - elif f & SPTF.TRANSFORMED != 0: - img_type = 3 - elif f & SPTF.LANDMARKS_ARRAY != 0: - img_type = 4 - elif f & SPTF.PITCH_YAW_ROLL != 0: - img_type = 5 - else: - raise ValueError ('expected SampleTypeFlags type') + img_type = SPTF.NONE + target_face_type = SPTF.NONE + face_mask_type = SPTF.NONE + mode_type = SPTF.NONE + for t in types: + if t >= SPTF.IMG_TYPE_BEGIN and t <= SPTF.IMG_TYPE_END: + img_type = t + elif t >= SPTF.FACE_TYPE_BEGIN and t <= SPTF.FACE_TYPE_END: + target_face_type = t + elif t >= SPTF.FACE_MASK_BEGIN and t <= SPTF.FACE_MASK_END: + face_mask_type = t + elif t >= SPTF.MODE_BEGIN and t <= SPTF.MODE_END: + mode_type = t + + if img_type == SPTF.NONE: + raise ValueError ('expected IMG_ type') - if f & SPTF.RANDOM_CLOSE != 0: - img_type += 10 - elif f & SPTF.MORPH_TO_RANDOM_CLOSE != 0: - img_type += 20 - - face_mask_type = 0 - if f & SPTF.FACE_MASK_FULL != 0: - face_mask_type = 1 - elif f & SPTF.FACE_MASK_EYES != 0: - face_mask_type = 2 - - target_face_type = -1 - if f & SPTF.FACE_TYPE_HALF != 0: - target_face_type = FaceType.HALF - elif f & SPTF.FACE_TYPE_FULL != 0: - target_face_type = FaceType.FULL - elif f & SPTF.FACE_TYPE_HEAD != 0: - target_face_type = FaceType.HEAD - elif f & SPTF.FACE_TYPE_AVATAR != 0: - target_face_type = FaceType.AVATAR - - apply_motion_blur = f & SPTF.OPT_APPLY_MOTION_BLUR != 0 - - if img_type == 4: + if img_type == SPTF.IMG_LANDMARKS_ARRAY: l = sample.landmarks l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 ) l = np.clip(l, 0.0, 1.0) img = l - elif img_type == 5: + elif img_type == SPTF.IMG_PITCH_YAW_ROLL: pitch_yaw_roll = sample.pitch_yaw_roll if pitch_yaw_roll is not None: pitch, yaw, roll = pitch_yaw_roll @@ -141,56 +160,26 @@ class SampleProcessor(object): img = (pitch, yaw, roll) else: - if images[img_type][face_mask_type] is None: - if img_type >= 10 and img_type <= 19: #RANDOM_CLOSE - img_type -= 10 - img = close_sample_bgr - cur_sample = close_sample - - elif img_type >= 20 and img_type <= 29: #MORPH_TO_RANDOM_CLOSE - img_type -= 20 - res = sample.shape[0] - - s_landmarks = sample.landmarks.copy() - d_landmarks = close_sample.landmarks.copy() - idxs = list(range(len(s_landmarks))) - #remove landmarks near boundaries - for i in idxs[:]: - s_l = s_landmarks[i] - d_l = d_landmarks[i] - if s_l[0] < 5 or s_l[1] < 5 or s_l[0] >= res-5 or s_l[1] >= res-5 or \ - d_l[0] < 5 or d_l[1] < 5 or d_l[0] >= res-5 or d_l[1] >= res-5: - idxs.remove(i) - #remove landmarks that close to each other in 5 dist - for landmarks in [s_landmarks, d_landmarks]: - for i in idxs[:]: - s_l = landmarks[i] - for j in idxs[:]: - if i == j: - continue - s_l_2 = landmarks[j] - diff_l = np.abs(s_l - s_l_2) - if np.sqrt(diff_l.dot(diff_l)) < 5: - idxs.remove(i) - break - s_landmarks = s_landmarks[idxs] - d_landmarks = d_landmarks[idxs] - s_landmarks = np.concatenate ( [s_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] ) - d_landmarks = np.concatenate ( [d_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] ) - img = imagelib.morph_by_points (sample_bgr, s_landmarks, d_landmarks) - cur_sample = close_sample - else: - img = sample_bgr - cur_sample = sample + if mode_type == SPTF.NONE: + raise ValueError ('expected MODE_ type') + + img = cached_images.get(img_type, {}).get(face_mask_type, None) + if img is None: + + img = sample_bgr + cur_sample = sample if is_face_sample: - if apply_motion_blur and sample_process_options.motion_blur is not None: - chance, mb_range = sample_process_options.motion_blur - if np.random.randint(100) < chance : + if motion_blur is not None: + chance, mb_range = motion_blur + chance = np.clip(chance, 0, 100) + + if np.random.randint(100) < chance: + mb_range = [3,5,7,9][ : np.clip(mb_range, 0, 3)+1 ] dim = mb_range[ np.random.randint(len(mb_range) ) ] img = imagelib.LinearMotionBlur (img, dim, np.random.randint(180) ) - if face_mask_type == 1: + if face_mask_type == SPTF.FACE_MASK_FULL: mask = cur_sample.load_fanseg_mask() #using fanseg_mask if exist if mask is None: @@ -200,26 +189,30 @@ class SampleProcessor(object): cur_sample.ie_polys.overlay_mask(mask) img = np.concatenate( (img, mask ), -1 ) - elif face_mask_type == 2: + elif face_mask_type == SPTF.FACE_MASK_EYES: mask = LandmarksProcessor.get_image_eye_mask (img.shape, cur_sample.landmarks) mask = np.expand_dims (cv2.blur (mask, ( w // 32, w // 32 ) ), -1) mask[mask > 0.0] = 1.0 img = np.concatenate( (img, mask ), -1 ) + + warp = (img_type==SPTF.IMG_WARPED or img_type==SPTF.IMG_WARPED_TRANSFORMED) + transform = (img_type==SPTF.IMG_WARPED_TRANSFORMED or img_type==SPTF.IMG_TRANSFORMED) + flip = img_type != SPTF.IMG_WARPED + is_border_replicate = face_mask_type == SPTF.NONE + + img = cached_images[img_type][face_mask_type] = imagelib.warp_by_params (params, img, warp, transform, flip, is_border_replicate) - images[img_type][face_mask_type] = imagelib.warp_by_params (params, img, (img_type==1 or img_type==2), (img_type==2 or img_type==3), img_type != 0, face_mask_type == 0) - - img = images[img_type][face_mask_type] - - if is_face_sample and target_face_type != -1: - if target_face_type > sample.face_type: - raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_face_type) ) - img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, size, target_face_type), (size,size), flags=cv2.INTER_CUBIC ) + if is_face_sample and target_face_type != SPTF.NONE: + ft = SPTF_FACETYPE_TO_FACETYPE[target_face_type] + if ft > sample.face_type: + raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, ft) ) + img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, ft), (resolution,resolution), flags=cv2.INTER_CUBIC ) else: - img = cv2.resize( img, (size,size), cv2.INTER_CUBIC ) + img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC ) - if random_sub_size != 0: - sub_size = size - random_sub_size - rnd_state = np.random.RandomState (sample_rnd_seed+random_sub_size) + if random_sub_res != 0: + sub_size = resolution - random_sub_res + rnd_state = np.random.RandomState (sample_rnd_seed+random_sub_res) start_x = rnd_state.randint(sub_size+1) start_y = rnd_state.randint(sub_size+1) img = img[start_y:start_y+sub_size,start_x:start_x+sub_size,:] @@ -235,22 +228,20 @@ class SampleProcessor(object): img_bgr[:,:,1] -= 116.779 img_bgr[:,:,2] -= 123.68 - if f & SPTF.MODE_BGR != 0: + if mode_type == SPTF.MODE_BGR: img = img_bgr - elif f & SPTF.MODE_BGR_SHUFFLE != 0: + elif mode_type == SPTF.MODE_BGR_SHUFFLE: rnd_state = np.random.RandomState (sample_rnd_seed) img_bgr = np.take (img_bgr, rnd_state.permutation(img_bgr.shape[-1]), axis=-1) img = np.concatenate ( (img_bgr,img_mask) , -1 ) - elif f & SPTF.MODE_G != 0: + elif mode_type == SPTF.MODE_G: img = np.concatenate ( (np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1),img_mask) , -1 ) - elif f & SPTF.MODE_GGG != 0: + elif mode_type == SPTF.MODE_GGG: img = np.concatenate ( ( np.repeat ( np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1), (3,), -1), img_mask), -1) - elif is_face_sample and f & SPTF.MODE_M != 0: - if face_mask_type== 0: + elif mode_type == SPTF.MODE_M and is_face_sample: + if face_mask_type == SPTF.NONE: raise ValueError ('no face_mask_type defined') img = img_mask - else: - raise ValueError ('expected SampleTypeFlags mode') if not debug: if sample_process_options.normalize_tanh: @@ -272,3 +263,52 @@ class SampleProcessor(object): return result else: return outputs + +""" + + RANDOM_CLOSE = 0x00000040, #currently unused + MORPH_TO_RANDOM_CLOSE = 0x00000080, #currently unused + +if f & SPTF.RANDOM_CLOSE != 0: + img_type += 10 + elif f & SPTF.MORPH_TO_RANDOM_CLOSE != 0: + img_type += 20 +if img_type >= 10 and img_type <= 19: #RANDOM_CLOSE + img_type -= 10 + img = close_sample_bgr + cur_sample = close_sample + +elif img_type >= 20 and img_type <= 29: #MORPH_TO_RANDOM_CLOSE + img_type -= 20 + res = sample.shape[0] + + s_landmarks = sample.landmarks.copy() + d_landmarks = close_sample.landmarks.copy() + idxs = list(range(len(s_landmarks))) + #remove landmarks near boundaries + for i in idxs[:]: + s_l = s_landmarks[i] + d_l = d_landmarks[i] + if s_l[0] < 5 or s_l[1] < 5 or s_l[0] >= res-5 or s_l[1] >= res-5 or \ + d_l[0] < 5 or d_l[1] < 5 or d_l[0] >= res-5 or d_l[1] >= res-5: + idxs.remove(i) + #remove landmarks that close to each other in 5 dist + for landmarks in [s_landmarks, d_landmarks]: + for i in idxs[:]: + s_l = landmarks[i] + for j in idxs[:]: + if i == j: + continue + s_l_2 = landmarks[j] + diff_l = np.abs(s_l - s_l_2) + if np.sqrt(diff_l.dot(diff_l)) < 5: + idxs.remove(i) + break + s_landmarks = s_landmarks[idxs] + d_landmarks = d_landmarks[idxs] + s_landmarks = np.concatenate ( [s_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] ) + d_landmarks = np.concatenate ( [d_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] ) + img = imagelib.morph_by_points (sample_bgr, s_landmarks, d_landmarks) + cur_sample = close_sample +else: + """