From b333fcea4b74436bd17388d5fe2cad7bcfcf1163 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 23 Mar 2021 15:00:24 +0400 Subject: [PATCH 01/47] dump_ckpt --- main.py | 2 ++ mainscripts/Trainer.py | 11 +++++-- models/Model_SAEHD/Model.py | 62 +++++++++++++++++++++++++++++++------ 3 files changed, 62 insertions(+), 13 deletions(-) diff --git a/main.py b/main.py index 2ba085f..cad3e92 100644 --- a/main.py +++ b/main.py @@ -127,6 +127,7 @@ if __name__ == "__main__": 'silent_start' : arguments.silent_start, 'execute_programs' : [ [int(x[0]), x[1] ] for x in arguments.execute_program ], 'debug' : arguments.debug, + 'dump_ckpt' : arguments.dump_ckpt, } from mainscripts import Trainer Trainer.main(**kwargs) @@ -144,6 +145,7 @@ if __name__ == "__main__": p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.") p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.") p.add_argument('--silent-start', action="store_true", dest="silent_start", default=False, help="Silent start. Automatically chooses Best GPU and last used model.") + p.add_argument('--dump-ckpt', action="store_true", dest="dump_ckpt", default=False, help="Dump the model to ckpt format.") p.add_argument('--execute-program', dest="execute_program", default=[], action='append', nargs='+') diff --git a/mainscripts/Trainer.py b/mainscripts/Trainer.py index 7d73e2f..4afc218 100644 --- a/mainscripts/Trainer.py +++ b/mainscripts/Trainer.py @@ -27,6 +27,7 @@ def trainerThread (s2c, c2s, e, silent_start=False, execute_programs = None, debug=False, + dump_ckpt=False, **kwargs): while True: try: @@ -44,7 +45,7 @@ def trainerThread (s2c, c2s, e, saved_models_path.mkdir(exist_ok=True, parents=True) model = models.import_model(model_class_name)( - is_training=True, + is_training=not dump_ckpt, saved_models_path=saved_models_path, training_data_src_path=training_data_src_path, training_data_dst_path=training_data_dst_path, @@ -55,9 +56,13 @@ def trainerThread (s2c, c2s, e, force_gpu_idxs=force_gpu_idxs, cpu_only=cpu_only, silent_start=silent_start, - debug=debug, - ) + debug=debug) + if dump_ckpt: + e.set() + model.dump_ckpt() + break + is_reached_goal = model.is_reached_iter_goal() shared_state = { 'after_save' : False } diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 380a053..0ef99a6 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -204,6 +204,8 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... archi_type, archi_opts = archi_split elif len(archi_split) == 1: archi_type, archi_opts = archi_split[0], None + + self.archi_type = archi_type ae_dims = self.options['ae_dims'] e_dims = self.options['e_dims'] @@ -236,22 +238,22 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... optimizer_vars_on_cpu = models_opt_device=='/CPU:0' input_ch=3 - bgr_shape = nn.get4Dshape(resolution,resolution,input_ch) + bgr_shape = self.bgr_shape = nn.get4Dshape(resolution,resolution,input_ch) mask_shape = nn.get4Dshape(resolution,resolution,1) self.model_filename_list = [] with tf.device ('/CPU:0'): #Place holders on CPU - self.warped_src = tf.placeholder (nn.floatx, bgr_shape) - self.warped_dst = tf.placeholder (nn.floatx, bgr_shape) + self.warped_src = tf.placeholder (nn.floatx, bgr_shape, name='warped_src') + self.warped_dst = tf.placeholder (nn.floatx, bgr_shape, name='warped_dst') - self.target_src = tf.placeholder (nn.floatx, bgr_shape) - self.target_dst = tf.placeholder (nn.floatx, bgr_shape) + self.target_src = tf.placeholder (nn.floatx, bgr_shape, name='target_src') + self.target_dst = tf.placeholder (nn.floatx, bgr_shape, name='target_dst') - self.target_srcm = tf.placeholder (nn.floatx, mask_shape) - self.target_srcm_em = tf.placeholder (nn.floatx, mask_shape) - self.target_dstm = tf.placeholder (nn.floatx, mask_shape) - self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape) + self.target_srcm = tf.placeholder (nn.floatx, mask_shape, name='target_srcm') + self.target_srcm_em = tf.placeholder (nn.floatx, mask_shape, name='target_srcm_em') + self.target_dstm = tf.placeholder (nn.floatx, mask_shape, name='target_dstm') + self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape, name='target_dstm_em') # Initializing model classes model_archi = nn.DeepFakeArchi(resolution, opts=archi_opts) @@ -609,7 +611,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... if do_init: model.init_weights() - + + + ############### + # initializing sample generators if self.is_training: training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path() @@ -650,7 +655,44 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... if self.pretrain_just_disabled: self.update_sample_for_preview(force_new=True) + + def dump_ckpt(self): + tf = nn.tf + + + with tf.device ('/CPU:0'): + warped_dst = tf.placeholder (nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face') + warped_dst = tf.transpose(warped_dst, (0,3,1,2)) + + + if 'df' in self.archi_type: + gpu_dst_code = self.inter(self.encoder(warped_dst)) + gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code) + _, gpu_pred_dst_dstm = self.decoder_dst(gpu_dst_code) + elif 'liae' in self.archi_type: + gpu_dst_code = self.encoder (warped_dst) + gpu_dst_inter_B_code = self.inter_B (gpu_dst_code) + gpu_dst_inter_AB_code = self.inter_AB (gpu_dst_code) + gpu_dst_code = tf.concat([gpu_dst_inter_B_code,gpu_dst_inter_AB_code], nn.conv2d_ch_axis) + gpu_src_dst_code = tf.concat([gpu_dst_inter_AB_code,gpu_dst_inter_AB_code], nn.conv2d_ch_axis) + + gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code) + _, gpu_pred_dst_dstm = self.decoder(gpu_dst_code) + + gpu_pred_src_dst = tf.transpose(gpu_pred_src_dst, (0,2,3,1)) + gpu_pred_dst_dstm = tf.transpose(gpu_pred_dst_dstm, (0,2,3,1)) + gpu_pred_src_dstm = tf.transpose(gpu_pred_src_dstm, (0,2,3,1)) + + + saver = tf.train.Saver() + tf.identity(gpu_pred_dst_dstm, name='out_face_mask') + tf.identity(gpu_pred_src_dst, name='out_celeb_face') + tf.identity(gpu_pred_src_dstm, name='out_celeb_face_mask') + + saver.save(nn.tf_sess, self.get_strpath_storage_for_file('.ckpt') ) + + #override def get_model_filename_list(self): return self.model_filename_list From e47b602ec8acaf7ad175afe4dbf3be3ec9192015 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 23 Mar 2021 15:13:10 +0400 Subject: [PATCH 02/47] SAEHD: random flip replaced with random SRC flip(default False) and random DST flip(default True) --- models/ModelBase.py | 12 +++++++++++- models/Model_SAEHD/Model.py | 7 ++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/models/ModelBase.py b/models/ModelBase.py index 3cb88c5..ce219ab 100644 --- a/models/ModelBase.py +++ b/models/ModelBase.py @@ -185,7 +185,9 @@ class ModelBase(object): self.write_preview_history = self.options.get('write_preview_history', False) self.target_iter = self.options.get('target_iter',0) self.random_flip = self.options.get('random_flip',True) - + self.random_src_flip = self.options.get('random_src_flip', False) + self.random_dst_flip = self.options.get('random_dst_flip', True) + self.on_initialize() self.options['batch_size'] = self.batch_size @@ -297,6 +299,14 @@ class ModelBase(object): def ask_random_flip(self): default_random_flip = self.load_or_def_option('random_flip', True) self.options['random_flip'] = io.input_bool("Flip faces randomly", default_random_flip, help_message="Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset.") + + def ask_random_src_flip(self): + default_random_src_flip = self.load_or_def_option('random_src_flip', False) + self.options['random_src_flip'] = io.input_bool("Flip SRC faces randomly", default_random_src_flip, help_message="Random horizontal flip SRC faceset. Covers more angles, but the face may look less naturally.") + + def ask_random_dst_flip(self): + default_random_dst_flip = self.load_or_def_option('random_dst_flip', True) + self.options['random_dst_flip'] = io.input_bool("Flip DST faces randomly", default_random_dst_flip, help_message="Random horizontal flip DST faceset. Makes generalization of src->dst better, if src random flip is not enabled.") def ask_batch_size(self, suggest_batch_size=None, range=None): default_batch_size = self.load_or_def_option('batch_size', suggest_batch_size or self.batch_size) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 0ef99a6..2772870 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -65,7 +65,8 @@ class SAEHDModel(ModelBase): self.ask_autobackup_hour() self.ask_write_preview_history() self.ask_target_iter() - self.ask_random_flip() + self.ask_random_src_flip() + self.ask_random_dst_flip() self.ask_batch_size(suggest_batch_size) if self.is_first_run(): @@ -630,7 +631,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.set_training_data_generators ([ SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(), - sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), + sample_process_options=SampleProcessor.Options(random_flip=self.random_src_flip), output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, @@ -640,7 +641,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... generators_count=src_generators_count ), SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), - sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), + sample_process_options=SampleProcessor.Options(random_flip=self.random_dst_flip), output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, From f387179cbaa11ef607d4b92662454c1fc0458499 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 23 Mar 2021 16:01:24 +0400 Subject: [PATCH 03/47] Added faceset resize tool via MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 4.2) data_src util faceset resize.bat 5.2) data_dst util faceset resize.bat Resize faceset to match model resolution to reduce CPU load during training. Don’t forget to keep original faceset. --- core/imagelib/SegIEPolys.py | 8 +++++++- main.py | 10 ++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/core/imagelib/SegIEPolys.py b/core/imagelib/SegIEPolys.py index e658711..1a4c3d2 100644 --- a/core/imagelib/SegIEPolys.py +++ b/core/imagelib/SegIEPolys.py @@ -77,6 +77,8 @@ class SegIEPoly(): self.pts = np.array(pts) self.n_max = self.n = len(pts) + def mult_points(self, val): + self.pts *= val @@ -136,7 +138,11 @@ class SegIEPolys(): def dump(self): return {'polys' : [ poly.dump() for poly in self.polys ] } - + + def mult_points(self, val): + for poly in self.polys: + poly.mult_points(val) + @staticmethod def load(data=None): ie_polys = SegIEPolys() diff --git a/main.py b/main.py index cad3e92..d9fd4fd 100644 --- a/main.py +++ b/main.py @@ -255,6 +255,16 @@ if __name__ == "__main__": p.set_defaults(func=process_faceset_enhancer) + + p = facesettool_parser.add_parser ("resize", help="Resize DFL faceset.") + p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory of aligned faces.") + + def process_faceset_resizer(arguments): + osex.set_process_lowest_prio() + from mainscripts import FacesetResizer + FacesetResizer.process_folder ( Path(arguments.input_dir) ) + p.set_defaults(func=process_faceset_resizer) + def process_dev_test(arguments): osex.set_process_lowest_prio() from mainscripts import dev_misc From 8d46cd94fdcb0b962ec7cb081636163de4ece11c Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 23 Mar 2021 16:07:50 +0400 Subject: [PATCH 04/47] upd pretrain option help --- models/Model_SAEHD/Model.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 2772870..79329ff 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -170,7 +170,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.") self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.") - self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.") + self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=Y, random_flips=Y, gan_power=0.0, lr_dropout=N, styles=0.0, uniform_yaw=Y") if self.options['pretrain'] and self.get_pretraining_data_path() is None: raise Exception("pretraining_data_path is not defined") @@ -220,7 +220,9 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power'] random_warp = False if self.pretrain else self.options['random_warp'] - + random_src_flip = self.random_src_flip if not self.pretrain else True + random_dst_flip = self.random_dst_flip if not self.pretrain else True + if self.pretrain: self.options_show_override['gan_power'] = 0.0 self.options_show_override['random_warp'] = False @@ -631,7 +633,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.set_training_data_generators ([ SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(), - sample_process_options=SampleProcessor.Options(random_flip=self.random_src_flip), + sample_process_options=SampleProcessor.Options(random_flip=random_src_flip), output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, @@ -641,7 +643,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... generators_count=src_generators_count ), SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), - sample_process_options=SampleProcessor.Options(random_flip=self.random_dst_flip), + sample_process_options=SampleProcessor.Options(random_flip=random_dst_flip), output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, From 0ad94211017aeb754c9b971368f51307934db47a Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 23 Mar 2021 17:22:42 +0400 Subject: [PATCH 05/47] upd windows magnet link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 42b5383..614d798 100644 --- a/README.md +++ b/README.md @@ -194,7 +194,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul -Windows (magnet link) +Windows (magnet link) Last release. Use torrent client to download. From b7245d888b4ba2b5b5d5a786403a5e9bd23b18b5 Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 24 Mar 2021 13:00:47 +0400 Subject: [PATCH 06/47] missing file --- mainscripts/FacesetResizer.py | 147 ++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 mainscripts/FacesetResizer.py diff --git a/mainscripts/FacesetResizer.py b/mainscripts/FacesetResizer.py new file mode 100644 index 0000000..f2ec391 --- /dev/null +++ b/mainscripts/FacesetResizer.py @@ -0,0 +1,147 @@ +import multiprocessing +import shutil + +from core import pathex +from core.cv2ex import * +from core.interact import interact as io +from core.joblib import Subprocessor +from DFLIMG import * +from facelib import LandmarksProcessor, FaceType + + +class FacesetResizerSubprocessor(Subprocessor): + + #override + def __init__(self, image_paths, output_dirpath, image_size): + self.image_paths = image_paths + self.output_dirpath = output_dirpath + self.image_size = image_size + self.result = [] + + super().__init__('FacesetResizer', FacesetResizerSubprocessor.Cli, 600) + + #override + def on_clients_initialized(self): + io.progress_bar (None, len (self.image_paths)) + + #override + def on_clients_finalized(self): + io.progress_bar_close() + + #override + def process_info_generator(self): + base_dict = {'output_dirpath':self.output_dirpath, 'image_size':self.image_size} + + for device_idx in range( min(8, multiprocessing.cpu_count()) ): + client_dict = base_dict.copy() + device_name = f'CPU #{device_idx}' + client_dict['device_name'] = device_name + yield device_name, {}, client_dict + + #override + def get_data(self, host_dict): + if len (self.image_paths) > 0: + return self.image_paths.pop(0) + + #override + def on_data_return (self, host_dict, data): + self.image_paths.insert(0, data) + + #override + def on_result (self, host_dict, data, result): + io.progress_bar_inc(1) + if result[0] == 1: + self.result +=[ (result[1], result[2]) ] + + #override + def get_result(self): + return self.result + + class Cli(Subprocessor.Cli): + + #override + def on_initialize(self, client_dict): + self.output_dirpath = client_dict['output_dirpath'] + self.image_size = client_dict['image_size'] + self.log_info (f"Running on { client_dict['device_name'] }") + + #override + def process_data(self, filepath): + try: + dflimg = DFLIMG.load (filepath) + if dflimg is None or not dflimg.has_data(): + self.log_err (f"{filepath.name} is not a dfl image file") + else: + dfl_dict = dflimg.get_dict() + + img = cv2_imread(filepath) + h,w = img.shape[:2] + if h != w: + raise Exception(f'w != h in {filepath}') + + image_size = self.image_size + scale = w / image_size + + img = cv2.resize(img, (image_size, image_size), interpolation=cv2.INTER_LANCZOS4) + + output_filepath = self.output_dirpath / filepath.name + cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] ) + + dflimg = DFLIMG.load (output_filepath) + dflimg.set_dict(dfl_dict) + + + lmrks = dflimg.get_landmarks() + lmrks /= scale + dflimg.set_landmarks(lmrks) + + seg_ie_polys = dflimg.get_seg_ie_polys() + seg_ie_polys.mult_points( 1.0 / scale) + dflimg.set_seg_ie_polys(seg_ie_polys) + + mat = dflimg.get_image_to_face_mat() + if mat is not None: + face_type = FaceType.fromString ( dflimg.get_face_type() ) + mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type ) + dflimg.set_image_to_face_mat(mat) + dflimg.save() + + return (1, filepath, output_filepath) + except: + self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}") + + return (0, filepath, None) + +def process_folder ( dirpath): + + image_size = io.input_int(f"New image size", 512, valid_range=[256,2048]) + + + output_dirpath = dirpath.parent / (dirpath.name + '_resized') + output_dirpath.mkdir (exist_ok=True, parents=True) + + dirpath_parts = '/'.join( dirpath.parts[-2:]) + output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] ) + io.log_info (f"Resizing faceset in {dirpath_parts}") + io.log_info ( f"Processing to {output_dirpath_parts}") + + output_images_paths = pathex.get_image_paths(output_dirpath) + if len(output_images_paths) > 0: + for filename in output_images_paths: + Path(filename).unlink() + + image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )] + result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size).run() + + is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True) + if is_merge: + io.log_info (f"Copying processed files to {dirpath_parts}") + + for (filepath, output_filepath) in result: + try: + shutil.copy (output_filepath, filepath) + except: + pass + + io.log_info (f"Removing {output_dirpath_parts}") + shutil.rmtree(output_dirpath) From 1652bffeb0bb242dc06516880f754abfea1e207e Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 24 Mar 2021 13:42:57 +0400 Subject: [PATCH 07/47] change in xseg apply and fetch --- mainscripts/XSegUtil.py | 65 ++++++++++++++++++++++++++++++++--------- 1 file changed, 51 insertions(+), 14 deletions(-) diff --git a/mainscripts/XSegUtil.py b/mainscripts/XSegUtil.py index 9f9f893..b95bb2c 100644 --- a/mainscripts/XSegUtil.py +++ b/mainscripts/XSegUtil.py @@ -10,7 +10,7 @@ from core.cv2ex import * from core.interact import interact as io from core.leras import nn from DFLIMG import * -from facelib import XSegNet +from facelib import XSegNet, LandmarksProcessor, FaceType def apply_xseg(input_path, model_path): @@ -19,7 +19,16 @@ def apply_xseg(input_path, model_path): if not model_path.exists(): raise ValueError(f'{model_path} not found. Please ensure it exists.') - + + face_type = io.input_str ("XSeg model face type", 'same', ['h','mf','f','wf','head','same'], help_message="Specify face type of trained XSeg model. For example if XSeg model trained as WF, but faceset is HEAD, specify WF to apply xseg only on WF part of HEAD. Default is 'same'").lower() + if face_type == 'same': + face_type = None + else: + face_type = {'h' : FaceType.HALF, + 'mf' : FaceType.MID_FULL, + 'f' : FaceType.FULL, + 'wf' : FaceType.WHOLE_FACE, + 'head' : FaceType.HEAD}[face_type] io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.') device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True) @@ -30,7 +39,7 @@ def apply_xseg(input_path, model_path): weights_file_root=model_path, data_format=nn.data_format, raise_on_no_model_files=True) - res = xseg.get_resolution() + xseg_res = xseg.get_resolution() images_paths = pathex.get_image_paths(input_path, return_Path_class=True) @@ -42,15 +51,36 @@ def apply_xseg(input_path, model_path): img = cv2_imread(filepath).astype(np.float32) / 255.0 h,w,c = img.shape - if w != res: - img = cv2.resize( img, (res,res), interpolation=cv2.INTER_CUBIC ) - if len(img.shape) == 2: - img = img[...,None] - - mask = xseg.extract(img) - mask[mask < 0.5]=0 - mask[mask >= 0.5]=1 + img_face_type = FaceType.fromString( dflimg.get_face_type() ) + if face_type is not None and img_face_type != face_type: + lmrks = dflimg.get_source_landmarks() + + fmat = LandmarksProcessor.get_transform_mat(lmrks, w, face_type) + imat = LandmarksProcessor.get_transform_mat(lmrks, w, img_face_type) + + g_p = LandmarksProcessor.transform_points (np.float32([(0,0),(w,0),(0,w) ]), fmat, True) + g_p2 = LandmarksProcessor.transform_points (g_p, imat) + + mat = cv2.getAffineTransform( g_p2, np.float32([(0,0),(w,0),(0,w) ]) ) + + img = cv2.warpAffine(img, mat, (w, w), cv2.INTER_LANCZOS4) + img = cv2.resize(img, (xseg_res, xseg_res), interpolation=cv2.INTER_LANCZOS4) + else: + if w != xseg_res: + img = cv2.resize( img, (xseg_res,xseg_res), interpolation=cv2.INTER_LANCZOS4 ) + + if len(img.shape) == 2: + img = img[...,None] + + mask = xseg.extract(img) + + if face_type is not None and img_face_type != face_type: + mask = cv2.resize(mask, (w, w), interpolation=cv2.INTER_LANCZOS4) + mask = cv2.warpAffine( mask, mat, (w,w), np.zeros( (h,w,c), dtype=np.float), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4) + mask = cv2.resize(mask, (xseg_res, xseg_res), interpolation=cv2.INTER_LANCZOS4) + mask[mask < 0.5]=0 + mask[mask >= 0.5]=1 dflimg.set_xseg_mask(mask) dflimg.save() @@ -67,7 +97,8 @@ def fetch_xseg(input_path): images_paths = pathex.get_image_paths(input_path, return_Path_class=True) - files_copied = 0 + + files_copied = [] for filepath in io.progress_bar_generator(images_paths, "Processing"): dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): @@ -77,10 +108,16 @@ def fetch_xseg(input_path): ie_polys = dflimg.get_seg_ie_polys() if ie_polys.has_polys(): - files_copied += 1 + files_copied.append(filepath) shutil.copy ( str(filepath), str(output_path / filepath.name) ) - io.log_info(f'Files copied: {files_copied}') + io.log_info(f'Files copied: {len(files_copied)}') + + is_delete = io.input_bool (f"\r\nDelete original files?", True) + if is_delete: + for filepath in files_copied: + Path(filepath).unlink() + def remove_xseg(input_path): if not input_path.exists(): From 30ef9c08088e371bfddb691edb429372125f3e6d Mon Sep 17 00:00:00 2001 From: iperov Date: Sun, 28 Mar 2021 20:03:38 +0400 Subject: [PATCH 08/47] removing alipay donation --- README.md | 4 ---- doc/Alipay_donation.jpg | Bin 64136 -> 0 bytes 2 files changed, 4 deletions(-) delete mode 100644 doc/Alipay_donation.jpg diff --git a/README.md b/README.md index 614d798..701917a 100644 --- a/README.md +++ b/README.md @@ -333,10 +333,6 @@ QQ 951138799 bitcoin:bc1qkhh7h0gwwhxgg6h6gpllfgstkd645fefrd5s6z - -Alipay 捐款 - - ### Collect facesets diff --git a/doc/Alipay_donation.jpg b/doc/Alipay_donation.jpg deleted file mode 100644 index 48781e17c4bd9f08f8c1b97e8c69c46d8b64c51a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 64136 zcmeFacR&-*6F7Q+fC^$46#>Oc?_G$3h=_FQ280k0AqkKKFc@M(K1x%RCM6O<@a~{xN!e$ zA2-(@8aKHA(3U*-lN@jc3^{FF4cYzdX+UtbP0soz05^LE5L!(EZr3>=P5Ah>@0 z`VH#^H*65xDzH&t>(0%Bf}3~l-oAb3_U*g33a+|e!Oz6M`&^p@1U3n6+9D*hWrvWE z&<>6#wBu)yt^Yd&*e`&s>w(8WI1iTuz`d1=XDb(b7;NMQ_G>N($R#h=sskD3I3BRq ztmEb5U%z3a0N1}~ask{tzcaT0T^7F0d`rJFrcW_oRi(b_w6> zN_;!+rxxiQJhapDrK_@v-;GXv{=?B$bD!p6Xw_X$ifM9Ed6t}GdIq;R(&Z~6&{tgZYFD*Ogln=Rn^1BhOZ?zMKR+&G{$6z_Sbl9Y$Afu^9;iJPI_^9x?BZ<)Jv3JdbX%R4$P;KZzo{voH1Ie&tM7vA9&V>vWs zIk<#=?%Z=8t4GabFNaT_Xf=q>>(`46*U`BDKru%KQtl+rFLyXX7P zvdDMEL!$0F{@oOt(a!RrxuxJ5vjrPM-&Jz`m=#r;@uN1urshd_?Zj@2lOE=SS4aXXQkSL5wwgFkJjP6EHRq(YjACZ&_<`Q;bX3@-)IbuLgm@76|fZ zmDPM81*vNu)_oW~!AixRV-?cP_@B~6@Kxo)vHqf$}1d?xFWCJ~zuam~Dj7&(M&;ynNE(3e%C`SUE=Wjdg| z6>&%M+fE8o4>EDIXZP6D!&3z7y95N|(^fX1I0yTSmcRz0&D|k=x+V0AR1dmX=rTk+ z>$t+Bw2N*t(E{zHqQlz59@Hp%MvaF>z=cAwa4+7p$JOO^O%99Pl-qXj_Wn_cbca_y%%n(`n{Y9uP=J@o$lF0{UETlKORBH~Gh(G*sO=EXevcp0M9+sX9H$P=2h`PA1PoC7Dd zR7Y87T)&+gl9+>R3=*XbMTM9xIDd-e3w+x~re7RQAe_@Uk0(-!nu6kSi97~@vG%3Y z!vR@$SfLEdvpz+PlKMAnpzrvUZW3?2%T!a@kNy4g=lc**sK-;NF1&1A@9>xRgvNsr z@@(LB8o?wi!EmzJ)Y|zm)<*eYFOzSm7&0x-fOHYkKj$z~sb6SRsd82-{r2-mqkRg9 z!38Vz2$I7D6EYW6Z*+OuI}q%$V@ID(h!2d~ivK0q6wukEDX(fb-e=%6YCoO1R5d~z zKa|xDX_T&eR+A5_pz`j3ed+tF(ei_*3!BP&>&t2W%T`QxZTOo(h;F!25C^GPT;O+#8G+aA0 zTrnKS+P<>!EqRJOl3X<`8sn~XzKc}Eahhv>cpVQq!ugBWGk=^iW2kgSWkLz4c80O1P` z6G)$aY<`S7YLX3LtzVaHr;3u8R)vZK$?A`v(Pf;nE321N`UBDkxl^?G73$^1iWT0= z9aa<_6m{ZzExyNv-(7J0nMKqg?~6io$l2NN=N~TAk2&ZLbzUXFyCD;HcUW?ed1ZPF zD4z*7pyQsu(8H?L&GCB6AVx&{QW59yZJYf`e=*Fin7?ahETK%>UZhcCb|X4czc1Rc(}eDN6??fvH>F&LYU$_eU+HE%a%wlc)?0c@@?c@s!Ym!R#iZw zR2--`L%pZXSO@1~gVQ?e-*{M2MAb%1zS{O&gC{yL_kB=<44}O|Iw95hcsp{fayqt- zc%L5qAeqFN&TA(R*cJG|gA2)`i;tE*k@|2^!^O{os79u7q^l3;`q?#k*aXH)(_npK z_dHVJgG2*WwST03(4lEL*JIKv?MKEPQ0ZWad3N~}aa-L&ciPHO9$$Om?V)|kIWTAa zqU4w`1GD0Ki_&EsC^47tG6!Oht2d_-ohXxdo6%BCYjDo7oQOO(n`KAi0jlNT49jb% zid1Mq!J9qCpP1D6RN1GV6;``3`!S=yYl`l7!MM1Zq&lafQ zVQ>7|fK|HvrP>O8vGJLvU>jqZWt~Jn0;n5#D01p~pRmiiH5T2-5^0{@bm7jWp~@I8&yzNqYxUQL`=e~&X6)?xZlj%I&`4_OP(1`aluoT%v9OoK8d zX0sgLQ^c+6s_2mXK)fRavTY`@yTtVLVTbZY7$d)?izz0pV~pRMiz1^4E0@n2_Anr( zY5XF5_4yjf@`W&s$h4l2yj&@!plvK0D7AjbtWRUzM}*yAyfGvR7NpPIxzk%5TxMlu zf4@eQCgc$n0PrxrS0m8X*RdM4_;XnL=dqz_v|>L3f$3so2YnM8)I#s6k(*^qEZmW# z>OOI!os(JV$c?v)A2vLg^{Ib}o`gEbEU2ifscU1X_0KMwH$&1#nR{$S9pzJ93GL&w zq3Y8DqKoQyhb0(^Dmuf=SQ*C7pb3Ude!^sZiXFsCpOs1Lq&I24YSqm(iMNY^Ef*q& zT87}gqm$i}6;o<>5d<6HXQ)~}VRRf6|upom)++rO_4K~eYYwDFyPR_OJuLfB?mI`g>)K9!_YZ$-%Lz1|IJ6h7ZU zu79@Kq$TUfU6GqTS`8`Re8ub+y>PLfv_}jb{^Ae0_^Ws8AC=JFGWjWmXBeqn-*n69 z-Y)p~MSK=-KgrI!Q0!xKPB#6*%4Kmc9S>Y0)A{>rv%KW(hevI{;n+a$QQPh!T;t(^ zVc uEwX&qNoMY)@q);9W-IfIwgkHLvG8+Fv4f18O4KEt*S5y^qkSHei-d>XY@oc$ zYNFn*IJk(3Y#@+6wBn>+zufdPXi^(EFp^8q$CiIsLbjpaJMIWmtjikjqn{C+NeI>m zaH^3WWXSao(_IOtMo1Mb0cn<^zqkXLT#KTpA;iyn=1NAr&D%R?2dMFPEvqY)myD8e zL2Te&rEW6a@hJ>*b>;ZV^SXqdVnQm$$C%GeNYr$;JBWElQrLkJ7Z%Z}Tkw@O0d*%p z`9Nn6y+gDL4lUh-_WdWPNiB^FtU_G!Mmn&ieth4otw;+o>a=L}{ z`Mr$kn;FWFVOV~*bFLbRJWNF@8&HS&Yj@LX&8Fl+Gd_ZfBY)2hM^cgZq=t^X*Pian zAEcT+WrMQcWlwZ2@5FXB>xdh$vhrQeUU^g<*VA4>*UrEQP7P5hcKNy?s)}v=2S(B) zbB4U143_J3c+?P>zUX*oTb#2om4xXiSRCrkMTn(A!yRF@hP(F0k6B+y88z-*c|%Fd zCxl5c9{4zD1))VQ!p<~t-DpfcC}XkcUt0)H?-`fDsiPGm*vz?}x@kzM;BDg$WwAx2 z!M&$DOg~lCuz{CspxekSnm$w2SyxQUWe}~NchhffQ5MenJXX~9q}5#~ijJi3xzE5? zd}RZJIfTSqnlo7|Do2A2*km)J=y#YRN2bDuaOMH$;ZOEJ>pkA-ra&e=7jXq|QQb9@ za#0lLUR2IQ+K1b)BxI>>*Mhq~Ie$oI$UInCU3E0AkX*r1HKszHi{}%scQLG0F(Y{` zm#CUOHqyI$^e_8EQyw_<;Z#N+pvZ*ILs{>tQ9YaUMXfSzdwq%$-9_`!2`=U3@h(`@ zqRweRA2ergt}M41+pXAe-mqX0?K2UF|P>nL6 z3MYBP5Haqq$pzJ>C-6f3<3T!(wL0~4UUxmtQ8qEIv4LFErf1luab|w47R2<@Ac>jQ z#c)a)PtSVtk}4WRUFwl5vTsV8Zfxu{IB$aR@qDsSHK5i%JZiSkd#(l2cw4yFn1M7i ze(7P-_##-R0+nd6v&*x%1L@QUova90fS9f9jWcfm9}Jdt=^nJ4>ckvMRco$X&V|{t zi_xArjlE>i(T{m6v!fT7#4%X5BmK(owhvlUyIhiveyMIs6jfy=_f-eSM6!Gj>xQwU z3G*AeAz3(*wi?8I7ln zrJGw`!6Nma?sTjwue*kz?WOqEPglqJJRe(erJEJvc@8km1hTDc4c}Hjzv}JlDDHLP z@~BHmZ(~3asWYfP-7Y$*Fx!qqlP~xV-Oit{oK5?17xC_y>+`*vLSk*p8*SlbjMimU zGw`(vQ}3u-in+RvQIbpMDX7m16+zFpc<+w#s!ymRDS;~V2OG%1dL@=nB0YM{auT3E zb>Lg4_<=NGwNTy0@-VziKo3R9t~H=$_&#l(NMi#@4pn%u72OCk+jLTUNOJgx(Y*d^ zmBSC0_YTSX>yEQ-2D5?G(>6OjB{SaRM)=QABd9w^ZLsx@-=sZXWi~S6-TP+3u1r9CruQ`TFyuO+>r$h3QFNez}M!cR@ZV%-OnUHmAaF zqAGl;#=m3EIE|4sQ_9MqN7UL*^Ia!ie6{DmvW_U}HZ!hwks+c+P-UpT!NVsBW1G|M z*}!o|lvP8Ge5TEyS9z+uVA)L5;Gk%zsWKbL99a2GDwL;!D#G^ZvY92kO2<0O<0V4X;E~5vje5&CWN2ni`rW>)E_Uep?9O8S{O&ZaYNhk1R zVlT+t9qO(P6%G7a5U+T-%I?H^F&~a4yr3ir1&wurj(m zDSSlJYl9y=H^26mmZm}P(B#X?QP`T^!h8%J94CPlOxwEWVt3mUVBhBb0BqD76&uPi zShABdIU!umgLaq620lHAcPil;J}R6ppmic+ST|FL7H-*X0n{$yObcnjRB(cbxFSq) zo{tOb^;D;Q;HR}0OkFAs@{&t!6{2w%A0;=2C8jN(HgSa3 zW=oEcOFxfcBNIAxODsq8?1x>8?i-rF<7&JcCidwrX_QjihA6XR16j@s&CZSt@t)Y> zh=h>)UGq@}_&5&?X>Uk^?)`#}XPOa&g-DugT6b{`B*mfWAf-jO7!OTT-?pqZG?%Mk zf6jNOVxsJaArWfH@X8W1v&fH$qGXvc#z?pjXX|XF5W%#$5*vngzK9?V!5YNxN^912 z*osT9h{-R2%^`IW^%x1nn?1q#pT;pOYP1z&ytCQX`(=rU@`}dBqPpe1RNbhF{IWZA zt^9tZya5_!51rJ{9L0N*xSMCN;V>PM20r~-0TD{g0lN1{1PO;mWMyH*h~0Y>m}i^G_GMUP(fa~_Lz4FgXJ_TD(`es;XoIMNoZ8dYjH7PL zgp45uWG)+zoUFePT#Pq*+4Sy}$8_d6u1bSmS6)_DR%NaDX|}c(FK~>Zewc2S#k63= z5R>}w7oQB}aVL6~wdS{Ck^UcNgK9^n%{i?gl<-^n)k9! zX`k6b5QUa+WXj*SPdsPT<{P>2vAf%7?=wFKKgqXpGMDmjc}qAIMK+L5V};~;_Ji}L zx?+qC-iI+3^Ev$C7KZ2gZ3ZXwJoS@0&BGvU!CKbJ+ z`U4axHY+&<$rtv@2AKJ%Ddy$pW9y~| zP~f^IbL(L>aD_@({PWVmd^e0#i88dOXt!)##0!KAjX9EAgg;6+lRMc@q#o^kF6eY? z_G!H>6lr_Dc$~g9!k260{-dd9Zng2>YwSxVQFLKVdccaCw~4E_+wt@?+=(3L=3SP)N&r9>(a?E1yZl z%ilNl%fSxF^X8b8G;73z%-yqkxNLy>#o> z*vQ--oH*%R>wGk;Bm!-(A6)pk2H#^W6;y&~LNa55@2~-IWyebjTD>q*M6U-Y@5BQ8 zAL^gRYx1hA$?xj&3&3F;xLA}2uTlks`>Bi}Jf`&0|+ z`fBOQ>YZ1XyoU_yS(N>~X3pfO*A(!5bm?P1>FXyv6!wIA>>wTr*_+K&&#)RWo9r*C zp6xT6_n_aDO{lfa#EKLa6d7MoE8g9w`$DZZp2XC4D%C=fI-csF3NQ|mFFKG&;jD=6 zyl16~*X!FDHA)dd1PsH*8v>&ka|D*GXgp z#^AHDx-N^v!2j6HNTYk`5_9R8Z%T<<+qPa3I5j#uH5^id!2iKbI^}7?8^=^u{k=@D2gJ&o5lP>Xq0xNb z6*UR9C71U(;yr?D`$rcOh@W)Gl9Q|=tNZi-Y9tSHN+>g;vFeMuLPj#9Q7!=kIWLDx`U>vWO4aLA4nKI$sIXuIyN9JS zn3&?o$5Y|9#NM!6R61$g-q<*|a4RcKNjj*{nhmJcy%=I-kxUW-#cjN;(1j2-sw9hPkc z98*4#blSXP|0_$4D zzV$i6!iAucXzjoBPGNQ>ni*RR8jk>ZArM*>}767}^dD zCG~066KZe~_#U=7hbc@I@XLAWVcEK@tgm%jHP5FX)UNN5NHzd=i_EH#IC|kz=9!ij zMn3&U_k)l-;gqL^o5Fo|jh0B3<0_cZ6UyChOM<^TQbx}*63lukuxudD&K9m&71@{+ zP^l$gM`_BU!g|Z8iXLSA>vjvrHO|Xv1#yw~q^%~_~Hej%9RF}145T;Fy+3kEDw{*6HQDomB z|9X#%+IG)jm+~q!3fsnl4+$I5C$cdzQ8}X9k5V_Of2E(BNm*7vX6|it(@hE(@Ymmi z)Kzn^)ATt+L*-EFt?t94Q{-tJwKc5W~=zoh>ol%~!=u^fi~t5PF@ z)3f12P2WFLCo0KP5`^sTxW|P`9ya7;=@kV1Lu5vD>sjwt1(^fx*Pi!vkBG3m#G;cn zui%19*}#Vjo0pNJOtK?0K&rK)K->~7yJ^>#ZYYUwJ#u2YXy{PYP=NMQoH`p&)=e{H zN#_v7^}*E?*}N6j{TO-^B^YAd=8g@Nao3*q2s;oVj~bsHsycie{9y}{n-K5p=uGUB z_Lo0R(K*=}-BdKA5*G@ENk1L++BYb~2JqtjPNBD_>WFmZb%e0ypypV z%Ian1yLyZGxZr2B;BIVpRIeO$eQJ222kB3f7U(KzQL&|7}=wf4w-%kDInTv z1rSOvr1ZwCVP^B%-|YB`J>E#TSF3j0=5&73$S5MEFpCZN4y;h7>k8G{GkTn( zdV{lInCHHZDIY?Wa=8N0dk5>|J-WO`zC4`M5fGt*D~5$V)rR^nvgFgp-G!m|oEjZu zPy;gq&iVJvO5o=S*H-8xLKra{>XAib)pj#j+C`F-(mwqQEmA>Y#(tM;TWQ1~yG6A{ zf<`a6K>8gMSz=~DsTCsT#t(?h6u+M%T`zC!^UcYz>%);?WX zbJ7xWe(Ir^5zDaO{hq`i7;kGNe0unBzMnG}OLt=ifDcw%gM=gDf)>GwBQL#D9K)x} zb$W3@>TWeFB3tY7$3bla$yFz`9hXZ8b9ZbgF;K59@NY7JkK|v80e_Ca!9xA$eb02H zi0WrC4avi3o(xt$Wx%v7AugJCCcE5C+;SyLttB539HOcU&M9tMJF--&Pm&F(0@cCt z(~U!m77@>J6XJ%m%Vwz*Mo{v~=z^!@7@_fq87V0xD;(5H{g{tmH>!0tuSY|jmXc?N zf^BQSC4~;B?CbQj$4qBNE}gL0Xf{ySecdpPp)E69{7A=aT)F}|YE3k`va_v$3YCWB z(dC|v)1_n%Zq6KpdL=t1wMgV5f~O;}q-BTEIWI5ar?jg=F`?bxa3Oej?d97rup9c4 zMdAnZN<_WtK>%` zWMOAn2>qY{KR=~z*HN=pQT<)@w%sjW{`_sET2@;J3)!SqpYbCM3$Cj5(XGRJbTgTD zlq<0w9yG=`rGg=f!*rK_Sy@SFu-9%5M2=wWkgPdUkQqSWysIydj$zK?BnY4M}!(6vl1lcM7J zYTz1(4P5O8)coqQ@h$Di*zX51Gl@65<_2zdlXrjbjoWp8;CfYK6Zo@Q2d*^wtfymV zHFVe}r3k4F?eUGjoVCAw{Mjt;O=$fmaH6OKH9GXLi=rc*i8a$-ysho0+j6lwqI){< z6RWEitCxGZ>PBeL;!K1sgpj-%;wVgA1`qx52?B^{o)bYf`kFt{{9| zd2pPC9*GU;bFoDD_5M%~D{`+!aqFW;geRTsM5ee1<| z`E~6RSI|>pv!m#dV*c;X8L{hvo3myKEE7c1htqeHknTh8c4}$(s|tL^3wN9m%&8l&GwnBau4`4NFk_yW$p8kZ=}tn=Ngv&19S zhoY?h2N$6 z@uO|{5@R546pkHPPNukz!lafB7o6Q_oy9(^Vy&6Sc<@ox9hlqsVK7P5&Rss$tGITr zY{6)1$;P}whZ-H@;0{!7fIYXn>Rz6C)94hhqucXrd!{_hG!>-Kig|TSd+mbT7kkl3 z)B<-$-2pQ0;aAt-PWn`d^t8WA-{diike=zfyZy@9MSvSTtDi64dI zM@y&!(2tmvd%WYYaExtPPbN*6QOo(o4_-&lJhQm@_F zL_bHkrd^GaBIKMeQ)dbHV~_ZfWtgpXSxwkuq{N0<9wxcVs?F9UZ~2JPvPq&MDpTiZ z(Gz52aC})O{$Y6=@d+ZH4RlVjE)%SWF2R# zC;)8U$K?R?&Q&JG`2=rLBX3f7(>Hf!D{sppdu0bTv|h1V(HJXN6v}%Q!w89i!;x47Cxa7T4G|vu!Ep0xSu4*# z4u+m5hEs-<{gWo#&(sA0KW|`n9s~&kR)7nD0MGyyKms1Xe!v7c2gm}7fEu6zD1d1z z01o&8o}iEB(D+5%+z*9vw}NA^K4z|$VIWu#FbC04;Gg@iLKu*w&uWp4K7X*m^eXpy z-WcRB?gd@3p57QwXmHDd-rv&M_7CZp`CvRzNDgzt987&5 z4Dxq_0ex>*zds2}546djguWkA|L+76^LOH$t2g>jj;RO6|4+gI;eF~)0+jjRr1fB4 z9=`;!+B^WTG}O}vx#8ikWa+klzZ(kcdII&4lU#L%e#rlT>3RPro}QoEd21x*^g&B+ zPCxkL^t|2n|2McL+8eV9HxKkaVfhDrqdy#mLHV6>!MJc*u*#&lhdKIp58}i*$Y97} z8#o7d|4HWGsyz>GZ~kx9TEV=3!p;3)C+s;CR^1kuAIit}5*)m_U~l$Bdi)lj5GTVD zO!JrV{D5S@)B7(njI4t}FzUepI($nM8KQjb=XMnu@!RL%Cp6Q=F{Mz2k^*dZ0 zb4OP5gnm^D29RsNM(UUbtO z+5a_*{eLHKRTk?xw11C7?i1j+1g~Fm6XV2Ix0(MOw|hBB9%In^*&a?3;5q<$E8ryn zSb%@dfE%bWke~{}fGUO)g#WJ)6ST+g2hdL(J{NDyc^8j= z!4iVO!E*3Gj1l_Gd6RR$)zVQiZ!jnTemA(g3wBkF zZt#cuVg3uc&2R9(q6@fs=%c(*e*X~e)}QqC4F6vr!2>?*=0(V=277@V7)oskKppC?S&<9G0zx{v` zXmOzkXmSYutm)^@X?}notO)#Pf7+CN34`&`l9xxKduvIltF!9lcU-CSI^MhOAOwYq`UNB8J35f3g zx;f+LV1?otw64wIb9I(8^{#6u=myX2hr@;5ynV9d_NBP0`tIDa# z!W30h_p58lDXJ-}XllxEK7Ottub?5Xs4T0frlq35`Rw^GCjr*x=jN_udFu49y1MT(V`B1$@PCyS0>Nq5DmEHp;13r5pUD=@+59VS2?x!e{a|nde>f6z z@ON=H*q^dK;P&U$2DriG;ofirh=vC5jpCoGe13l>{!fvAVeU`%>J9x}0eE}yRV57-1x*)sxSG2A@A5x) zqq}(jpR3yqrsac(mllp9<}0%Y}XxRJg-Xe#9H z=?!MAvY@5!2M4#mgSIUog!83+Sp{WTC8hJ6FYT*nsVYk=sAwrD{6q!mdAeWuuc(}l z^8X7e*zw&!dzt?gGH3j(%KnTe8Z@!F@~eki!F_+lygm1?_Af0L7{^jcM*{8Q4|kLJ z1@!zc(uZldsw=Cy!DTfRG!Kji~J>ze;)wz`&* z=I^%rtQ(DT#{{_e!B2XCz2o;NHvioh|MSR#-|WFIxgb5@paPMXkpI8jp8vhx^pny5 z+Ma*joBv1KBhOJCKW~-%zu(2xynm<}u=%SXP6g#r3ySX>A?><9`vdUx5cHftIW*Jm41y zejTLr8a!7EJQRsTn~U>?yXrQs-yi@UM+6>^1j4w%+}{~s$i>aWyAC{FX$!#3IX#Jo zZyoQtRnzzN8~6dfja{PjI=ODk2@99&64!i{w)5^+xHxye=%t>k@{ZI`mN$0(JPC<|%CW5^T>Dw{ zf9xxVNq+&TJW9#8#$L5;P3jJ}OAWiD>;6>Z6KVakgIlK43%5-9pFGlG&L66xBKsW> zOf{q(J5=c_7U2}4ETYi71KsI&hTHt4_jrYY;Df8JTOxRr;#OojuVxt?sG8X<K((s!)rbQ-1khc z$!yp!5G=i}?S^$$jt8_xk;``@vSY(hr~|0NmGOSjV<1 z2-HaddE3MviS1jlJLvI1;bdWWDxa?GiXGRi!OI=%H>HHUJpALtp%Y?rpnNsB?)W}V zNPaw*ZYW`tc3-{yga=9IQ^Kc7KL2f(!q?62vCH%Qbgqs6l=3aLI~vz=epX~3TVMv; z;_~J%(^K2nzzJaTd8Eilk$=vY5dZYcNj|v|rEXoB26seuH}ANj^e`-c*I!|L*Wlon zueeeU&;*2b2cADjjH|$-Kt zD#s}`u(0Z*M%!s}=jybtp@cIc$&ts${{mEUsmZA=Q?%*EcMXRl~C zSxp8vS%}`8c(kl_Z73sa14+tAjqmVYyHiCa0^$I7%Bdmg<8T;F;D*_adE?M+ll87| z125!6Pu67?*EpM#YtgmPa5Q%qmqdhj7z^OKaO)KZ z`23T%q1g6YS6_vnday5a?Yu<)`?JR4KBuS}U<@ZmNK zzY6w;V{S`_ckjD)i=VJJ{@b`4ydy%(JYjP)S!93ar@ZDXvhgR2JK9Z^vQu0t)`xmL z6W#LQBjA<;`E>f|e#w;r9KB4Fo3?J$JRF#OIx^(YhFD|tgL~_G*IP>D@p~)9s02yA&7L1J__k|Z9ZX;O z>Bdv5gC}r7!6MB}%y{810dD1ae50$|b+Ko6#{F-ejtZ2#k8O0n@%?&k|Fa`R*Xsg@ zoPAeo0&d*Y3$qO7&fBc3=zHbW)`u@t6iN*!(I*VjS{~8wN?5=1e{<+j?T+XRf#C+` z;m4Idyn-4S&X4&n{Fr%v1M==EiO#QWo6Jn5I*s@WU4LwlxNv>6 zO{q1?c$u*7{S^0NC=Wnb*)OOHZgk+8jvKVI`C?JzIvW(3R&T^smooKNF+ zwc#=OKv2~G&{8Lq&5ADGy>{$WV0YB|+uEKj%@T67%K0I|)YC%S&QyOo7vcBUMa6mf z7eMn480>ga=h@1=wAQ)!uM0%5KYfjQeApt(MKZD&DmlLH(eD0de(N_LI($FiD={M8 zX%5(PB-|U2>MYt2uF#Rj+Hu1nu=da~>1%UiH~xBy|3(OV<=F_|2z^$lua~^PfX7k- zb&bpX5CPEQ3V5RseUU+*(Jr`vp zFPK7#+8xZ3e(+<`51l(=hmSLk&YUx>?jS#O&d);R3Vw#)dXwNlq^v$Uc#p!~iR_xo zsxUM&?2DT^=O6NDu=qrR!T zVd3h&;Jql3Q1#5O^q?$zW;@|zmsi8fH*?e>mJ1hefUfie)Ehi@y7xT7!CeoeS z9gNEVpb3R+w!+RYlu=MAq9t*at=gd$8EN&%t0VGl3srm38XD#8hU!j{J(G-JF=zdj zuk#C9x^hDD?cc#qBs08byP?vbyPkVF$$~){sZLh95xkntelIN)4^DE@`o2v2Oe*_a zc(<%?_FWzy0Rwt%-ao1ZtZ5k6Ui2?}m1_w1%Go5>e^ zrr-!Ooet)j@-Sdr; z7*ln=8|oEGV`4t_H8_cPo2`^Fj}yS}h!(!d_Z)NR+f1HYuEel`fH7)e)F;ovqOlc~ zz8?oy<-s}k5@6{C&Em`X^GoW+&3Z@CM=7&a59r(jo3!8Uyz^x-KzS~u=H3?O4c}YZ z2|tPqP4!K`X}JcFPFL-GL)+LM^d-DTNybYi@aiM(gATg2+-x9`%RO`HL9}~|%Gf3Q z&O=Uc|Xq=Y$;8ZtGn+d`4T%oIl&O=7gjE zf#cEXqKUtmsZ)}AOHv|l$u`v9D;3lqL_{xy_!Jdiji^OQyg8ah-cqS~C~mn@x;tAX z;7w}U%*VF}4xB}rMlO-Jyl!dZtv?f@d+NyX(X#CZpK&Mss5c{0TMp)gKRH6Z?_ z@BL{>HKVo(GE7)pBFbe3-!brOX+rINWitQgPhfC2z@kceoO+&ZXEpts zc(1m6ed(<9!^hlwhuSi;;>*(EiuE%!ci-Y8=a(RY@sYQSEhddN>mRL>tu(}5wWAh# zLNB)<{(377*g!4DU(Gu|SrUUMU_14cPh~~M%71%pSN6i%-?wym=VQ0nxueU#SD1~~ z)GfXc0%F7VsdaY+x}eqKs0+1+d+e9u?C84DVcvdU++IsG5!?qa@~$9@VhnD9DHeGK+-~uoH^ek))m3Hr0}55zMQ@?CT>F7 z!06koDZP2=!Nb?%Wg{=_nF}<8Cp9U{hP3?h;Z*1$xsSOKQm~*ALAz-q^7)@&ipXx$ z-Xq3W8n;&$MaL-V&GK1w=X&9Zydgg5rDN-c#~w}%?99DMtU7UYYJ7qAEOClIOnPdM zr`!q5A<-`WEXPG4pNj_;oiL;g0bp|$5Ty&6$KdBfcb0-58=tQ_XL(>C>vf>jErV-@ z^#xoT%x;9e$Wm7fQ(Ib6CZ|t7lh4zi>we~3oxy*~?Xi4G$(+nHrTK4&rmSaX@BEcb z5O?ED_D~sjgM+{Ax|Uj6Ec_&NM&&Zs7g^l~WW0PuM3Y^$`dsKz9LtjTYU0(bWlgZl zNl12oCF~Zya?kLJM!qDBe>oKSd?IWv(IJWS=|O$^aGjNy(bXj_0@%rbd6abdOi1x|l=PWY^WEtskGIG;?b&&nXYQ|}`O<*7O*b~V_N{;QMdV^j9`4%h z@-vi8=ZE*)jHZpMYzc@p^4H!`|1$l?^ChomN-cRI#M5OJk*y(w=ndKmk3XK;bNa)D zt9Ny7M?}oFo*bYiWrl+{UVGa%ui2xLvGUsGmBrx}Q8+CKulW6vBhj%6#EJkh^uy~b zU$Q-!Um};pgTMJDmAJC{n(tVNjLf+w7 zwyJqMS8vO_JYsWpOxkJiW8&WuduE513FF#&5s4amlz!{eKKBFM-qCvk94};_iN37y z`o!|4>(v!YFME2LiKUd{T+{9{Z)qFM_K+JlLT=4r8>)8gnDQ3gp#Du3St&vfSt(y5`uWx~i`1*c3wz#+5;K7OSO%J*R{<8XVum5>oj&!4+ z;61Bdx?~|~KP&+}dQblW{rSUQ>J`ac(l;yH{4beLZ-_=K(e9o8E67_U6sen*|Ec%p zZMCr7Tl%=)4(#|Vx@>+p@;Z-hvi`2xot4@f*QpgicjX#E)f~@kAJ!um_ZGbYwORDW zCd*yAjo!V_^Wuc8(4RjGUXZ56UD$`<^tESSthA5BixwCp@Sa?^PbMToh9)_86O6D8%H2!-0sdT1r!oWxGb(O2|Q9%pe zQ42En`t~sA19mq&Ml_$-YOZXs>r#ZDi;|2mesO5O%HE>XXnZqPp6gmC{}o<~)M)Or zK_X)(7q5z|rmtkbGq#y(^c=QZe5O}c$vfD1RYCS?&^xHQwLx7;}r}M%<>)GmK$HqfLvm1OO-zMg$jEwJ5*#_y;TQ~?i9iP9cOZrgh z8O$DjNfF7yjKn=(Vh0YQ_kG=xva#lP=DCRb3#X1Jc;+MwC{2ecMwe9#jfo37cWWPk zW_3^7q4+MsY+t{VH>y1%r)wbA0Bd}|SLsj^rsvGy@R2*EQencExn7A-67HtXZp99$X8Dyvn!F*=N7o z?tAUtAFENnCal4nqxU{$A2m2SzbWQ@^#^~bd7-tjJ29PAsjeh3NbG*Ug(q)9_)4}- zu98^PX<-~&R$FHr`%R$bB<@?%#9`{Cp9f=(gW|X(iCtLSM>Z?w(qn|i`eG9EKnUt$ zVHZ;?Bsv!T)BRMJYSzoYM8uF$e~DeF?QWmWibGYn8T>;?UK0GvD&XOZhS$FMLidQv z;uDanr-*AF9lZYh^g{3(Z?>CU5@l;KD%rOR>3~BNLp+QA=dhz4{3cD!9)%RUjztOG zn=y-=qd_csPlcAhHBJr9Eq{_3PW42nYL9FvKErS|wpLA@H2?HqO)O52^UAn{V$148 zZRR0`4Hi0CG=2=rK=-8-Nc1E1_dG7Ex_SSkDgDPHK5GQ;Dcy-N_AH817w?8b90T@E z{T80;U%S@8@@y$nZy#v$eKxPS{1MS5g+&Uppz6fO$fC=`ppyyz&2GG(rB#vLl_CdA z<8V@NDm)E&0%}ly0B+x#RVajw1(XpYi`FT=Y$PUs`%yp*^)$}5AVA6~`xGtjsiG|# z$KHu9KleU7I%D`>aV3pSp4Ra4Z$k7Dxd#f={}bgx!>UI(c_FD{^2W4$A~($JxqNMx zyjAeRpU>@lTt2bkQGc6gkmVMQAj+AU8~M8v`i<`zWA|h*7P}5yflIb-N{`bNu2H%Q za#8ak4F)}l%M>!HF3Cgc%PDK&EIQ2?ZBuN{pCCq%y22+av(PoUN~W-7G`V!-GwfuS zY+{tod5F6jl6t#IYGvx1^g=rZp<@^;{b>BEGf4L->gOmLWKBgydAa=!s9Q4E_9+_Z zDVB@&DOUGqA$PsDOjoDE>I(}^M0T>gkqF5w90g@@+MY0d(XrB+r8SVnt^7olof;Ig zFAD3hV%@eOJT@s~_V9$jqn}K&NdCrCvqrmYiL>6b2T`w9&CY#qFp&o z-*4rs!HF|jcv*Tw z)eA-YeRi9suY_$dH*;gUV|JAlVWSl3KeR!K6{Zww3p0rq#F845zPt* zhRcSq(^WV@*H(_q=f^VEr$H8Vn%S+{)~gAFnEI&590__*T0u*KFlLY)INzw;`{;Ba zKwf2Q=qo>e+C;5VbUPvCg~;|}61n0>`emTwwa zS*4FpViZwb5g16~#1o%jV{ObZMcw(vZOV|Is##?_MTFf*vS0favP=cQ@<~`@(HX#2?eDte1!kerk__$GcpHSE7tRAp!I{qa|^5gi{LC6O&r!R zNd`i;CQYo`*qrwUv zICxY=_D#)N#7i3CQ(`yV%3?&uy2LuoB zHT?Q5f_=FsmjP?hsfp&RK4}hstfy#{t0MnlHKxD?GNZ%({;f+5zLXO&@R>%(o;21T zSv}l;iKNFx(GX7+oM~*{3}3@LjZQztnq>kjgOlFbHRbTkO|28u(wgCbaj1;PJrjx5xU@D+Uots_ zw_Ji;5wkoBM5Eo6PFZB1etfbrRHw04F?_=06TSb$?`S1r9p_Q{Ya-Xr6>DGXyC&)r zf+@T8rUr}nJqt|_j}(y1Pst zzpPuJ+!(+N4b0zp<}&0DYoZ_ell15|KX5rb9-HENU+rrma0;V&n~g$$JTd>+rnSls ztP0&~*7|LFhH0g)_?fk}bQn|fzaBNk_|K*xr91`oXG$M2ClG0a@v?$Wd5YsrJ>0>6^6UGp>`3rYgE(#Y4fdo!gBT4E9M#TRmX$(Nh-x zUmt|8`{>xhg5!8>t0b+eTS58^8rqA zJK;Oim7q?Dd)6b%8H&BECYGK?T)F2>Z58}T&m(7y!crS}sPZF0oC?ml*?5NLjo`m( zk&w`WfV-MfE7P@F_R;1rie-EWjftaz<=(3yqxJ0X8n{VBH!kyWA1{XJ*V#;JR9 zO_K_aKXq@7FZ*xOcG8$^cDihxlRtI|%osY5T~twR#AKigXE@YQ1*Ls2Bl9@_uG0ZQ zGOl{762|GFssrlJsg1yH8>+E0PBk~2Ymezar#)NTiTnd#dBWDhxaGCM9f|Y$Or;us zQZU@?VqA=%E7!<3iv?ZE<|c2;y?t;YrUa)}ULne;TJ zUnRRWwKKh{O}VPP^|{9g#*6dy zH2PbXB-&~MdYyH?qx=z9;xNp4{84sbQYgHvnXOd3m3p!&hz$*n0`oLvHPSV=`hJ>_ zlqaf)zaXCC8lh#|oi>*9#1T@OVHk~8=1At;O26Ih0LV$qS}JCyR+QwIlhGeH&P{yC?~*iP>~W$WOZt@)HpgYUofbW7GHMhXJpJO8 z#&0;1nNenzbi=aMFK%nh+nka{q9oHk3YFxV&}67~os6Kd`%)_<1@~j;mXs8vEXJ5d z@&=;)OXt#SXLmMx4Rt$BU5$07`SOSPgrzcacJk)ktC_azv!R;JUKeYe;VK&YD5;_8 z)TmTZ;-6(4BlNbbgQE~K?M^b_@qQC-st3zO1$;hNzkCQo`y-%!`h{mY7vGittNsZA z-gJKenYzVmOG~X1r6{OM3x#Z>D^H~e_zIO@8{2lfx%NX{xeWq9rgWg^asW>mDuy*9 zX|N1YPk_+75EW4re!$Zc)Dz%a0J-za(LTyPWbk9s2=IO{|3T{t#FvuATt)`;y6KO- z4y9e(Il6GJS3yi?_36~KkFBzVp=_EKN0`ZyQ*}XIh55bJxI!CH=96*CnPi2EUE6AL zQ)hMhFveYgP(I2xJAVw~j<3;yQmozT9(_;QmVKRCdGVK>HgLJ%=Ogb?zG0*i1QgR82;V}G2(LBv++NNJ-zbA*!_r&pDe z-DuSRwr-UV6z=RN8GE%U&??%dntbB+T(VrApC>%Cg>TpE+;XOd)8Zjd>SP;S0WO@Fxth1b!3N!J*_Tyv1O z$(>LsmHxr)cJs+ir-I6LE)ze<=>C`}5KpY_g3-C@-Z!Mh`g zJDX%u6hW3Y7wuj(dE$58F`f1fJ`UDeujO$eKA}4*g z7u7)$x(>N8Y>6^D+A=E4Y)p(yIsCW^@7q0^{mV?fyWt2nVe_ap0^{?w0u&iHolc!F z6XAN;kxw)v6c{j$`jK>Ip#!887?G%e7QOCz?kp*~*NIh);P}J}_1*JJ5&GLw*sDSU zMgpp+XbQlgvyfP?Py{Z|-mUVC^ObH$+ysVtrK{XBk;+IzSy{1KEsvF3F>XT$P0jFa z#5YaS;Ls7M6s^0y($}Zn96H{|?<4Bt+f3c7i)Y$J2y_ZRPf;Aoiste@)h-okCEAHF zhuNbhM}@T34t-(~F-Aj=8iEDrO8oLDPUfcp)-S{7Y3H>z_Dx$EQ=Y5kmhRZanthIM zNQF`=tq=%C#;E`43Tn7EP#6D9!LKxcvm~FCCgqJE1kSOc7?}P$GnvkjjRw;Vt;J6q zvzf%qcIO%O8r1||Uz$4A8m9dC%LJs`-MQi%0t!{3!sCUEsAD1X;zJ5*GNNxPsb;B9 z!cj*W>|!fL$M!$2s$vpn7GEO!4JK?05(g4vq720g`n?9f1}O=mbD8`BkY7lQCumJj zStQjl;^^WGmF%csYeyz+&#ORXLah-)k^$qsUl_VP6sR;h$lT84IdnG=;*H$fr5FcU zz8wK(W5k~Aaf%8+O|n%A#jZ@5gZEj2k3xM!L74GO7t-Tg9hRiI6Ye*G~!iC_SH#&b%hIw(*ScC<*){j8^pbOX|>_6Oz4B9(Mb_ zlWPDVRWU`CqPlb2Y6E3oRqM=yg#*pvXz3C2C|2W%&=gYOP z6M|PsWsAgHpNZ9s+ZIrezYwVDpCgoJqmE1oGciRqa+gwl5h}+F?Snes=Tq_6Olmo@ z0?vEd@HsfznwLxr)!T$ZzCD5py!4>f6tXSv&>NPD@ zj0y>9K|T)qk;st+_9c`naxDyjOneuJ*oD8i!)xAM+*&=BN<*%73~v{TQt>N5oXE+a zFTjrtrF$Lw?6Kqh@^``BsWVhF!u_g!mck&K*^kY|7Bz~U)dn6_1=xD@4_m2<6+sFj z0y}#U;3;nKt@|gpgOiC*(aCnspcJajEr}*(sjAQ-Z$Tb0=C>fzuOdol_pXG3%&)-@ zgL-cXI1hMCtM)ha_aeDb#f{0Hf`&Zui|GL=kTfodT|*^ULIV)z`}lT&k3v7*k+I{z zbG2}L(`gOY_Q6A=p#T8@6)x~x%Bx`GSA52mZIlbh&PTBT+5#{dUC@YTXsyB(tzkUHQA~p86^NCh`51G>pFzA<(Gh_?S>p+B?5$yKULT4U5qNoWL>1cMo& z>{@y?{g*#pwOJJOeNKsG8ucfl&u_)XR>|G6dVJYY$8v7Ra>Gvo!*`R%F|Dc(E^xwgamqG3?dD~ra+>8Y8kY8FR~Xqi(uv>OWd~Mq#@GqWsh|sGmHZ*`1-tF%7E@V4lr+LR zxuv7<_H!vV(e^YUW`s23noeq(Qc3-2ZqeaVBTx`(B%$0mAIzx=Sh8%o7*_D*#KtFj zfXkj-&p4F2G^UclrE4;S_?2)&YZ%u67$S?L9mKd`j!Vs{M-VVXJ8$4wbHK=&U?{xf4xRvq;486KAPmCDNMP(X z5UcLoCGaXZNDx4cZ-j3jcZ*2~&hd81o>A1aoiLYkUMau(jMS28At0!uNh-v)ruF`v z8fI3>$SqH{Ow2Q@oKquL*2K(mK5ADE$jwYvmIW9?VfOI|A)Fr#yFMH4Iu6=gu58qv z(X6+YDW5(~k2P;mmJ7mh?VNr{bBHERuH;}s9f6RO%<|85SYPPcF`V2+>k&EY5syzY zUiT|J6lgVUgFQXF#y9X zgmEy|v&HFnV&GJF=oooP;$cWSD)r;MN9mSGw{GKuJE(J&&HBNLFyLx;^o{XmW~VtK zmN)K30iwa;%W5>z+ZKmR!WW!jN^7P7#Qz2icfeo3=sTN8Y4#20Y&=w?@c@>VSzxV0 z8yIvBt}UFD{QOqf5jBF#a{PePM|KABf)jBxn!$=s=r`2q6=La`)z`KY@(9F_*0#Wy zPmdzKkrS^+ob}a_t)xVjAeDf85##D~ki_3@j}?WJ?vgf!QLkzoQOsjF;eh`9H;;oo zrvI78^Zz@KcPDpyDy%wMo$rxS!0(91Czg!~J;%fBLtP*X3VepyI*+;c6SEfyODeiH z4>v%zxyozUnq6Nqlfgo#a|`1Tfmv`8Bu!`&$R~Pj^~9A-+&oz4kfQDW-IWrC&k+)B zie+XOhD0^2l($fyOWw?F9_#X^Oj82554v`L+nyC1UD{21>FxdlNGKkGdQZf19A7+& zdUY7SaZYeSJk%^Hp=g!*wE{{!zjV~y zcGk;cmYua_%Khq{e*KBPeP*!s9veq}JA&NsH6oO{QJo)@&!hl&6}!{8f-+UrdoBHckRhOz{Q zc!MoY`)6-XIHPsJOun$Is%I$;QOt*4xI;l!Z_gPx5wKGTX({%4i|G682}zxzQg#-8 zf@|LBL7?-rCmcM|vp@mQB|6YV)3qU<_HsoFZ#XL?QWFtzQNj_!%`8o7h#T1?^Mi-* zGhMOEYlIhwaoI48CU>(~GUBGr{30)#8$6!8K>adex!e#mt#_9|soeYPLC{n66is&zrTd3IlK5x%rOXEfa&nbjJam2-g-NP5P8e1&{ z5y6Tz46$*+S^J7i7dhNLI3|&+`M`Iw#3kEQ#$aL`W=Xl?0N_b+J)!-EZ}bC=vRali zzuZjeB427)@M5i%RMvfW*YcRBH*u5|@6_S9baq%T`yB-@q5e?{8iUdaVfg4!KfOVK z_{paI_A-U#;+#xRrJgn6)74@1qb@;QuAfUj1qoIC+;5-Zeewl-lM2Ul!%6MVSL7p% ze(80MwYQdb^B@H0^Lw{Es0d|fzbry8w$)QP1?UgY7U)4>w7cp22M|>_C4dls)(Qu|L}7U= zlx`a5csOJ=Fw7If3k!6O0T$^Vd7KWlx` zW6VeupJcgZl20+DK>%_~sEcjn!#Z$_uYyQKM2*lvuc-U3smMD4$0s zv9CQxou!Hi53Ll2c4!1)8^N*o)l)OVIpjI^1wHBBQ*jUI3daP z(f{oM$I@vzy`SyiJ2EKY(+McY@Eqb?R51b#2oQ)Te=tfxWsdwTnfcPT+Co3&HP;_; zV{f=1$g)>mibQ6Z7k}NZF-5vP%#<$P8R|2&LD}Fl_BqlelRLGK?7mHq(vRy^;u&a^ zRCKS%`6Sx%2>UDX6x2z+t!v27!-kR33tqSWb)Em?O<>0LyVi68%DeRifuroUmbMq+ z%Nm~b^=5>oW*>@d6;LFbJLhfLaK;im0NI|`>J2|Y@-cD@a|8FtQ1hk0^42~o7bgBH zK-zkjPcY`htz-OjzFgb;1gZ89py6_;V5lIk_p&vmlQE?e=1rJ;*H6*!eYv>&`P@*` zG4LAgRno`sbm*D4=27q_!uMQX<7D{0m;IrIAV8l#fUGHH8(4mK-MoEY^l0~JciZ|l zew*NPn_vVs(*1hn3_lB8X9o%DmY)X15e4;4je7;SPS8z<*xm4*_)BwQv$yyp;@SMbk;l`Mrwb+E zwTSLqsYN8bE+4^inRaOYK`+t%%l`fB#TagOsk zQ+aG}Y_U-B)Kw{gs3&Wp=5rJfT zkJZ@wb!<USU0aWxx`~lGV81C|&za8v#UV66tzS<XGfy^F}~0Z93l_gfZ1a+oJXB7avN<%_Kvd|0y_-co~Wc!HX4LN}i0r z8HXhESw3hW+-PR|mG1cFxwr(5dQH!=*GZZm-*r$NEi6{E9djUV)s=3=JkK6Ff~>b* zN7wz@ozuVn0p$99_$zXrw|@=I*GVqTS2zX8Jvu+efztcQ#Lw~_ZzAlk(E3T1Pd%i2 zT6%oktiCV?GCw$na8@0A7QHul=e+57_A=jZGw7LD1|AVXU@x6|^t3)Qvz^X@nofwC z|1BBuD2JO)?j>9OzOkt9ghA0R;(X#;pysn?Oaqxz?O!UuJoZ)a0%H^k%*yy|5@=_s!Lq9??K?` z@KWV+9K9+hZnHhcEM>~#=efh)FGu(n4t z7f^9?XGfO2rA_t^cD(*gj6wsC20@)>##KMYTEt!0ti)&44&=(!1^|?3hkS+s=$>#H zx1*(Rbqgq@HRyFVmjSGOpyy)>VBIh5Szy5i3a0$J-v&pDS?pR)PVrf9cWiH-ZY!)P zYq~!0m>So$K~J&N(gARUj(rh#vG)g%@gDec8#@!hK_ah6Ja>bP7gBewkr6xb#gR;# z7k00|_%kx1Fs690DrPe*au9OB7EQDL@~`$uT;nd*f0C>;W?H~_#}=-*$_F)YV7QJ6 zO)vM116jM`BB&lwNHmyw7<#uJxf(P4A+mtE|T5(!!@o7KyeF!W=~sojKlfAu4{D{R4=pyAzx^9~ZwiYLHiz38dN2lhQ7NZMF5@}DP>D|MD<^(hHkfd8~BT zyNj|2SNFwdd84x~#qYb`RURV@$WJity2Ja$-S)j4OOLx4M3wU)brABzEIdXhC-@gFvUw~mxNx`cCV_UHK_wjqD=Hef~bMN&|;M=;l z<}f(w8v?wF;qS9E1+&2Q4n{Blr#zm!B)yeBX zV8}zk-jvrS_KUI~haZRcm-qdr(Qfb2-amlls{p(~55e>7=YpDfP_gBU;gIC-KY&3H z4`b-*lk8NFTu6L12;_Fy1rzlfmUo`;R7pf}UEZB!6^%B;8VFZlMj%w}Q&BN!#B(H0EG_Zc}K7>V#%2m)F&cm5Nm zUAO8+3u*{so-E(MMYe(@Nxw($b; zK#3ms7yYjmpli-UM&;-HtZ_PphzwtI!;HA22mdi2!-{gEOR;yeKY-gy3}5L-;ya=) zEn-K5Z$24^@dajjr+0iPX(rqt_nYNqB*6+nlZ;oQEwFr1+O@S7{6N^{5&epFx`3k6 z^%n&R93p~ms{f*(&huUs2r3$Rjo;@l!U;u#5$>in$4}F)RzV0%6*hO=Y%lc{>a;dG zeB6+0#LNA9{e3{LQ$13D0J;VmXM{x43X$ut%bWY|kM??h00SY0ohR1;8@(go|3E(T zYb_WC%L7!dx~E5B{$g}iS@Z9+Zh;K3uIKFL@I1$vO(+$xugE&OpoJ*fdCiTGXW8Uh?|?s zVx9KWOFK;O|I|r)*FPC-HYt1&SY=Q9m?{S-jlU3wAlKkbgs`dDbXDI(VN@*-K}P_-_*7r%Gc|^RM&WM5tyNu+kQ)^ z{Gt)R@y@!Td$#pWCC3=7rT}WE3f;6Lw*fBrs(SEOB1-A=QVDx?j3F9 zcaO3zT(A0##I-h8<&KNNK|eB z3|{PTFZ&CEf2fYy(ZgQ-SJARb`V4Ov*|P+eM(mc zQ}ejO-6bnVNza0!X<+0|>-Cuk{hkuN3bx|=uG184w4c}QD!8LIDd)eD*1v6@iI+~q z=L588?DzKNVuO)Y8I*OCyWZPSaCL|dEpIZC12G!hDO!c?>sKuYihTTyDZgzl zv~N>M1Tb@c73=$@q*`G_1s5t&9PheCQg!(1bC}?k?kooUiN^7_Drylmt2J4sJ^nn1odcFn(mw3Db47nuXc+VZ9MeKaubyZKm^--CtSf}N-b z6LDa|8aRA`OR?LDxve4;!(n}s4ZW!BZwjauq5NMaGVC+y#VJuFebhOPZ@ z+?^J!uPH4x%`b?hHhwnb720!d$OK~iCWWhy!?<9zPl3f_fgYLqfqE7DaI@*LYq;lk zmDzgALHEfriDNqjY&eRF1}3D>&)l3V@gIN~m5_MpfFB4W=-u}7TzX7MYr0$-CoUQI z)E3#56YbFk5zby!?E3xOHudXy78Mm{vBp$cEI6WBLZa53`a=ektcySr&T(eWQwOUQ zU+a`yn{?&IAv9bV)o32|33IC`ykK?aAap8>^hCg(rE90dS_|j;{$~C)FNS&60H)8d z{6dJDkxYYCyT->9)ygbIP_$@d5K0qN0Uqu1*%LIj}|D8;5jc0p|e{{aH=q2#JG*G$?bh*AE9W0!^bjl;ZZ?;_u2)Gcw+;iE! zdiP^^p8ee;*luUKyF9(6cVS>!%NKVUpm~O#N0{KYyu3a6OX z^s|az`lBH2@rp&yz6B&G8x*LPR!KBk9|1F89{*QO%IWq0sYzc2^E3R&eH8xyrs{le zFB9%uUvpkGudt>woR7i&F`k0LnGDc&ASOYR59k{0C^ir)kMH!rk=o1P$TT`2JhJn?LTO)wnwt~AV$mE>qBhPJ&x~>flgDdxn)!A%8i`X2xxVjdLQ){@qy0AG0fI9xO zXyVJ`6w6D)3bJ68V+euK+lVfmW^P_WCGriTUKRU=ijyJ|OBMST)}V*cz5mQ2p z{eb3`U?pM7bKp^FhHS^ZBDdE?gg6EF9iPwVr<<1_y8=GkpSO1JH*;>axa~q;U9~5s z(n++8k!2cF#f}-G;tMt^G9#ECoZrv+!PJz@>+}yG)LEx!)>iqhL!N;@O(9R=dxJdz z76umnO6(e|#cURaIP`DQELCapiH{^g@ZdX8WSVXC7wgd%@Fi>fjb?4?+CgjMgCNQt zA4;Ib<=|riY=UNaxuINk`!OTs>J5rnO6Il&NPc*mLCx3Qg@J{m99v9doXIK-m(#2e zHUzy%8C95Vo?1$t0x}4O;jdu5&}a>zCy%L9s>;@|%|;Q%l#3kn z6VTAYrJdj-Q$pe8@WDE($C|&bQ7g7Dg!b3*pjsXu`E!&Dm_gL-PrWlx-T)gbuS4s8=t(T{3BA*R| z)!Tr8PO;Ha)tI)!_HvK!8naT|`nz&-p1hRDCY+?x=}O-*`;=HBm|$38;~absNtoL> zI~^3tt!*j8_Z(Yvkt{naR*6l%SW+`wlW3vLlo=~#qO^&|!n_fkY^UnZAU6^oaLBiG z@y6ovSpzPP9S0wuIK+HjcXQlA$3-f5Qqdz}8e_;Wf!(D@>FcTA7jq;?TB*2B-pnZG zZ7KxNSz~Obh_w{UD~uE~6S^e9>k`v!r&P`0S-qoD_lDX%R?GQK;hK6r5=<4QnoAS> z)GVt00pY+Wms}&>sqNvILN)`PGX)O;=qpeH^g*M3E^}MIANJTzbH+Lv^_F?j7+e8K zpuCu+M0)CzQNxR(NY9I_a5B9VCE)Sqt$Qri_iR(1Jw&bK&`qNjFVQ%Zd@hCrH+qx`dOlreJUV|t-6m|;_=T1xb;91oo&#&IoOqS=?xRF8 zkCR;26Ar9F!1W%pS*(3Qbyn29Nr`^CmsACTPk2YO)_TRdNbCmXsL^-XdtycatGv*~Anq(NUOaSg%eId|6dS@cO!NOJ!#^t!yD<&((NVl&2k^#>oq`#nw_MtVm=lBohI?T&QHITrhL+ zL4o(q8TYoS2W32KamzyTmiS?H(E~o3%b8EwjQdI1dq*69!0$zi@woW3hUi4c6 zTeV*Ba?)x12Izj+k=L3aZx?N@JD*bJn8%xt8>3V!K@ipRcRYXMj+yYliJ|aG%v0Dp zU+0jNu^x!bQ&vud1EIoALT&I#`Hi(=dnK+Btiz4bHsEyGhvBv4M*Cyw+J#+Hvuozf z)$7qT51Yw}yeZL$q25PcGlESu5~;&!$KGSQ9qAJx8VQ6+TI%tCJvEUS(nGJI{yTTh zyv^{)5~&mJzq2=eF8*HK7IQ{uSa-w_kjN)Su_a26!E6aC;-EYWsrP=-%x8Lkr+Z4(f;|ha0&fJkkNA%jIBf`y(v|rIF1%-HC3uN;+9BH zF)jM^tgP}x-M2`!3QOhclwp{_c@??w48^We2qLQl98Wy6T&RB;pk9W|z?Q0i8K7`* zDP&#&U;`98AY*;ze}v~DQRXL}^@z-t7&N2dEE!RH?}5-E6zBvH|GH90M)I!B>bqX+jdt>P!ixQg`q^- zj2Px!vDSvF%3RB+a#2dw5#K%wzVW^}gCv*gf)hZ}M<{epl8q0&Rf>n~L^aJ%kz zej=Ov{HiCbBfAgMO>&0q^8E)BzP1(`)2q+HYHb_UH6}WJylEs=TC(4##kU}V7V+G}+q(=#Hv3UN1$Gf?8+9Dk?SH0W< z7N`xi4#1^JR`mv%+Qv0D(TE1dkGU8Rm-q}FyY31fltnWKj1Ni4h7V_MD!!KX?X=<1 zBxa$tV2oTcmw2CT))eiUukeL_WjXayM@4QUH<};o=&2(fz#$E$rw`{y^NH4(Voh;g z2x(#As3P-(CvI{rw~JLRigiZZbX$L$h^B%ORDu^NS82l#95kNn89@F*@0n{KKzdKL zH&MhQZqaZJpNpEP6n&%KP=!Lg4-G-}W)r{w)TD;XzDu>!mB<=7hPF~Rr7Du9EK;dm zC(xiRajh0fntA9b92)?eD-Y$qiUNXexD^P0qCUKEJGbZJr97A?-w0p^&DrmjK&p)M`M|`g_|q2$IvVHd zH7@Pls6fBm<6!x*QEnl2D0qh5lXg)-6_XP57V-H;gWY3}in6&14*hIJ6>DA}w-x=&M7s+tWLWH7t%^POW~8=&NZ~K@XdNIE7s^C65#8#mcPcy zC~>#Uv1iElw3N27bLfZdJZn*Igc;$YC z`+~QhJIo!SwX?QizQv&sKOAykK=UrA^*2v}jhaC=U>|@0$M;4W`CUFMX$g1(2?3okl(YT2-52HD9LavlB$RIo2%0sr`o*2EdE4f@l?zoT)CKedls8kVHE zX?#?duNMnriq37vUL7vq)e}Gxl>8EWqghyRaLMA+yUS-8fBEcVfHE$1*AeKxBv9AR zh{O0!^tpu~q;qEj*H+e+nK!H;;H^9B)kj_}1qR4s2f0uMmpcEgxIBZ1ws)`f4*$Bnl-deomN@K9CIu;;Yy@HN-j zGIaeArfvt;maf6N=tQ_iL4e`o7h1}M1)Auj3j71cA0Nb^R!5Gf#zN*NX;DCPwnX*} zi2V6K4@M5F+UK~x-aQ|PfO}Q&xVey0*vPx+WFulr@ECrVf6$N?IfjDS#2k_HPCcZ` ztGRTZ(@oweDdEv3bf(d-z-^4Nc@t$)!m$1%k|ykM+!3mV*q-)0r1VRqGJ{W^J@`iS zd7615l0T)cxxlZuyt> z1jBc8n8+4d>qHvJ?A;&1lVpZca83EiDB9#!von6kOLaj zE;AKeX1rpCf6gYw3<+owP-%yc)9g5(V%Ew9VF@!f0sM2#>|4$B+ZV49DZbzV5AR!a z)T*nFWIN&-mw(Bu9QRP6VlY!GI5YWrCqiwIjR=oi7Et4i688gD{+g8)s@zI5Y5GnQ z%kSYL;iH0JRwubaHsaUI@g30}y&NzHKItqu%5zcvm@-Pa9B;QPK`yQ+ir@&reD6cC z^7UH3zjbKxdUdpz%(uO8>u8K%SYf#VZJAUQNNXCw<5lDut8vsh#}X6vp)`;A>TELn zo-Z_iMt44EK^E{78N2;XoO(v4Ju<|b$#Z)zi)EoW#}$ml1i+JS$3awD&*hwm;A{;V zQ$H1}&ezg~Rju_JpG%w=`YIm`EKmF)bj)O!pW1hXp0G4W_|D;fn>MC3A+lKi%AH8- zTReqV+_Z6WqqsWD{Be<;Zq8Q~n zE*P#CvZD$c-tjKEYIhg;APH7&O>ex3NM7oVpj32Wbp_?;0VNd2PbjGxzTSrZw?CS9 zUj(MWx|uDDyAyA6-6h=LFepLeLMlV4%&GI3GAdo{=AD3qi9mcz1!p+*ZpN+!(Q})J z;Bu@z2YbkC25I3p=*54Spx)uwBU+{-RON5hx;jnps7^ z+zD>J9+2-0($>nXBCwI6xQVR%dLTFKOsW=ZFI}L7l@+Jg>cDUudq1lj@TSpYG{aeb ze+;nvJp4S2mEe|^ER0nGt1*_D=geZkjF`dnh19qWIw#(~9iipW1kZ$4L$h3wxt=qG zOdFJJF}uceNeF3TO3*5|wxrfyArchgV1l{MMIExPUHD`E<63_(za!yi)ejz zMx?qpMDfyBS}YQ(pkSe0a#Y%yIZGV){EgqvjGt9D%sRrbJQY=3nViKx{mSE^*xRIu z`tU)RUtp%1WQ1mXn^AsEhmOkt0i$2(?9%#6x5|I4ZI4f90?nmt&mxL>PDe}$q@R~oToxTlY z7!;Ke1|}jj1nVcb{8Qk3+f=h8IP}~Xb#ogVVq6I>V zyA&y&;847@xVE@Mix)3mpzuxFZRhOSYu22ZfBfNESu1(p`+hEaw!AfrSPlz1v3=to z{iI*x-=IeTF1Br?hCFt`i#J`(cdx+y+C5b|d36cke@?o$sGsMNm$&0B(cWSL5 z9tB~}k_h+bD^HTIeT?UwXN8X;jJ9!I^{e?oUvtbD>C{yhh=-Hva{A~Ol`AxJN-PwW z&t1gg2d61|T{wid&0r9g2RPJaRL!?W)fcTT?w?Q9&ah0>|^ z1z6B*UOo8!T!+h{7FS*mpHBV)oS6Wh#LpAHwUKWQKC5`(?IAtNChm*V5LMncxSw!) zv&k^zCmNx^&)txT?}-&pltQj1Ws>~$(vvPs3 z&yNN$okko}ta2MYhi%+^Ld?n^VSKzfpj>HbB+3&!Dh9VTAss_TI`3uRbN-Q*22Dew zN$ISp4Q-JXE?cC`?N2Mp_`?Lqx-UV7nBMwKlX7ijSWV;Cs)g6*4kul%zI`ZL26Byz zF>6><-6HJtJBJePN=HJ$HL~n5#3>?$uCjC2*W2&AEMg)nMB0aJsihEknnI5F?B5bR zpg9VA0Zs|gbkM0^`%d1apXFhjwQ}@;{h0|8YQ0Gwk_;R(8@ng@AkipAvEHas z-C4~v)0uoUG(tGU(+0yozH@O#P!Ew)?}+f1L-_pM4!v*0S~C6TChK^bn^3*ENm82z zRh3fL0)_cUyZoCoN2X<~zMn`JSwPOy4(j_1N>bMLRVNL!rL0Vc#?`qWn@YZX7XxK{ zy$>8R@m+n(H+TzDpC#+t>rhc!#Bxc^)G$z7w29JZ%7|+0FU9#p?yIJgOCVe#k}!Y_ zXv#PI;vb@*L_4k~ziv8VP1eZ80y?D76737m_CwwEYQAFkCiWm@X&ePu?w0GMi9{A>&^W;r>dFSYmF4Az6!TjnvkkdfW z7dAQaCSU<9@<9%IH)$E92tZ`B=2&MT#49xGE2}NrOYPmEaV!_(otLmo)yN>0YMR!@ zHfvl$!L4sgeIp<(cA-=aMIv}b9^4~+R!I}icn?^PNi@v@EY_|^VFUpr)%|8pgAkKdK0m4rPi_?CKiWENsWq4RAJJ9j80^e=X zrG#jLnoa>JUTFfyagp5g=YAc+>$Q2|{DMRRliU$XWgb@HM-Zp3$*rAwJ~uMw_vmgT z?r(b!Gh*|HZNP$H;SkR7&z-NC;tcrGqXOjsQET_s)Nh6;9x=!lddYqm?(rn_O2{eM zNQA$;`NDlA2$X!Dow?Kc7vQ)f?r9ccWcoFf>bc)ZwN1;4!*@2+k(FE;mb1?s1Wd@C zG}(CrH{Lr^_aYO$7$StdeBY(A!=x`4cw=G??%s9$q>VG{xbJ?Dy}t|lLjPzkX*p-i z?bWVxAlSW`#o>GCr!gt?Taus&&9tA+7ly1=fN`c2_$$&>GB9N$h>HOLY7d^PNPE{v z*K42IUKOd@iM4Ji%gg6*cMgWP2Tpx^>uuOq9*?#2#B&3@QW4>;%V!Z2P758b#-D{| znv)R>l{{ODQ}lsx_@a=ie69FozCCeQja>+juE^F`x@1^FL#x)#M*k?i?ovV(kq>PR zM+RDUTWU|YHT7*$fQzF~vQL;f5|1{us>6evAxO^1S}WJqF7?GH;yGgK6b%_#J6fwN zuOv2;`VD*e_=a7PHvjuPz0h&@Jmur%7pNf2N`*VX>jlZaxnFhRrX9i8c$e&x zBzd;DBCYDA=6N|FZ9_=MvYyUZeo`s0%#=97Wq;C2IPz&kT&iNH`^P@R9JA_dM$Lv# z*<6cDT3lONZBbo~65d|`0wpji!ZNBv_15Iq#o6H#=6WHN?Sn63TCj9k)5z!KU{3Oy zoOcsK-ns`{uN+@Vj3ny?_@%;d+Zd8yG^t!|)5C=46E4_Pw+Z-K-<{IyM~nIZ7c6y^ z+|J2loHVFk*_9f~L&^j-f=lR6`3`jSFFtxB9%rb1(*=aUAKv@GqO~EKoaC|AJfX-1 ztT3y|fLNkTgMV0I^gFVcXUZi5Rs-E*}BA4=v9@VR&DtOepY=j z2K8vp0e+u0tt>Ob?gY}CG51?{d?6ve*UCCcr-^(L9v%d23NB0ch}|6%@fEDf0|C@^ zk@FMMfKO-JjC)@T6*UVohN#jKof3IVeM6YCRP@rPxrfY_YL<@BUh~B9dh1^IA8%8g zc6ofUW%44`Kcm$Na)*~@#HEzAI3d?a z=RagRs5oG0YX6+`Nw?H4T`yLvOi#Ylnj8@1g8+}g>H<%?7;O_P!cx_&i&q2Y zNFbosv7)vG6gEzxSw=buVG+`F9u)oV*)ZnlEho|YBKy8<3Q(c@&lsp9RF5-<)wx-J zcW{QuQ{IexltNyQjU1pRhsH~N{bO2Y_%=B{nKNi{Q5i~I6nl{!@FN_pK|}WY;zrit zwOGMby2e|@`Q&hRsVSZ}$Kjg#JqUjRnEm19raxC6G;_N4RkP!GxO+`DB;n3LE;wNL z2-)8ORg8E32`JO|-vJArlLb$9^s$^2)sea`dL>pbvpFgVXSS+~7W$_PzSaGce3%BO z>KVx)NCSp>FA0__`xPGO%_V^n@|X2>X|1DDSi3x2Kc$eC=zrVNRQ-YrHG)+tH5r}n zLc-5hUd4-TlkB*S?6{_?pC z;e!Qz46-&1aow~4;|eF54UdPT<;!2C8hS2P1D4w!%|y?cg0=kZ4rXMF#GFKJ!) z0OA!klkPkCqoYnoSEz4g$4)mhwRP!RrV6tq%=3G+ry>mzafQ5kl?KoY0r2dV^W7PV zb#Rho7IfMDGYcO!Nl(nEnVhh?t+IXv?4kwI_m#j$F8*(p9BnhTM= zVg8NHVVBCFpZg|Lnuxc+FQOR8aEib6wN1U#sPzemt+z3o1x6HN2hI2gO=lJ*cS0vg?q1n=tED+_nhMjZm7qMy91c6GSmnWZ_bux7- zn&94{NDb{BhV=LQ?cH|&d8^6$k6TTh6pxw>xKaF+tlpR}mO1n79g2saqku`Rb(i4J zeIGgp+bKPDBR^1lv^0PX9~%vyKp1YGi^0PCwi)h1eT>!_)%LIq+*?J$-qYz|dFSH~Z(5UEts>Ifh-{T!x zRM7?($`V!XC3CTqFYQ>myo&;B)kmMi^jh_aSUZs)f#=6V*Z_tlGFwIex>uFJCJU8xf{p z7S|KEV~B#-C_r$pCg@)ux(@h`yXn#!-SaO3Heo3I9>%`Go%y*l$OP$j>BkYLjMojUC? zxS%?6o;&*R{?lL)qMJE4tqFsk^O^Tme@IJ{)}$(SpRq87E`c4`lfN)X4}dYgg2;H~ z^_piv%nyg-ns(W=?F-w?>8L(vjNhtjejo-}TFnw@Ga{LKeszKS4oVlQ@{NMAjCPQL{o||RNPqZ4DynmnRungi zK)fR&uK7*J4xBZ1P4&+oUq~=kPp$LFhN%e2K*>|Wb5}`Gd$GN%SZNiB3O2{r0FoOV zU5TZ`!{cTRxIAm-E|m(agx=k{xyH3C*MHtUW;Mg^et8)+nL_Ej8Sq&HdIgJ;V;IvMVa$9*cinU-zZPv=M1?3b0@nSGE)l@6fAR z^L65lba$wHaKgTY8M8}TIN6xF(eIVw-#37kuc@>y#oPBErhQPt>zkn53WJRX!bUWh zwc#k_d#8E5Sp{hjuE+$+oT`#-+?4qIGzA0zc6E~fx_`G>3IA39i`L8qkF>}%-{=ai z*eF|9M{-AN50a72*zmj`IffPc*l`NBlnKhsQHjaq{dJ$IL=C%kJEk5Px-T z$6bmVLRlXwE)NA|fS7}Sdca-6iV4){j0j=rFP-4W?PAE@*x8alvy$YZrO{>wuQ~Vco9Ln=J01;D_Yu@Zz97lt7K5JaOo^U>cty z=$96r`s}p}y!q|jvx@~RoOfFx&CLT6&~c5B=Rt>^0hR53g1%Sby_(#iR`rQaUC2`P zwANRD0Y(})C$GipSH%M?ioJb10(=F19_noF3mkAB+&(vIrmgPX){&ZRSmsjMHNs4x zV0TcSk58=j5wi|ozQ_v-Vg)@lAk&`-W>u8vX#DFRu+3HKe}^rV{V%X`zq-D`-VZ*f zANTNm)?QD(2Rjf+7BGbJ5qO~E#j4Zb>*q!2i+bblNNG|~KDMHBto4MDf&{Bfh`V2! z^wlUOQ)1110&?XJ>`u{6<9VKg!(EDa*^D0p9J}<-KGdpNFVAt!R^9EjGQ8RP#pPnn z;&-jGM473fh=WW-9TvMy;?SFk`Rrur)`j=X3)jdefAJaL%0H&!)qkCeZBwqc>Mw9; zb!1D#^A!;Ez&Cs=kZeS1Q3NoWS`gk0?bpUAVUuWA;kdiLw&d-Hd@N?C317z`a~x>E zidj_&SRA4~eSE)WDFuIX8T=~U2uxk8+vW5VnXn>br_l00EOBz-`a!D-FPw~ct^$nD zP}ANMe=M*w#pq1LDP?6aX&N{+k8=anism{yI$UE8@GFXqR7rD0b?ba%+MW14gE>Po z)?kn_b6!aGiZgDZ9wTv9ARcpGS-L0c0eJ;sd`eKVlmo$P=Bgf~tb#WqBdtm>z~hYp z^Y@+vy=oBz_i6~e77-f5{>k>u(W{36c*rT?ma&jpiGFl*-sqhfc8ACLq4rATM>?_SCL8iYHyQ!P_|vuf`HPkZI};Y%;Ni zStj;ZSU$cj69QAO;V;5&Vg=++Na*Adh=`IK!k20rVz?|N&l3{PNzErQrpN>d6DK;7 zB73PPaU-ZrD%bGYBO<>XhFJu_e|@1<1|HhOT@`U5*-hiF8 zIrLUY_yBmjz~F~R8!_jX9)xM6ce|7<7)%aL?sLKIDUlPH+G5cok`Xsy5R*&$yU<~f zVZbW}GQ}#<+%a$GwuQA0K<4wg-yFO?GQp@m{W?SktodC@9!{&!|mj5O7sK zEy|g}{rr1(27YHLs-d$Ev1=FmWM~t89BzH;%9U9$!!NfNqZWO_J*QQHqrm(9Cd{0_ z4tZVPbl|;2)mG3%5AH@LSBpT?L_^yGgAU(fyH`f5v)``d+0M0W*kIj@G@#O|MgW{$nnH2^gW}l zP@(e9cHZi7lYy{p4mr~v3JH4DNBzilZa}!DHgA|aFV8^F1KoxsP<(c$UUt{j8@{)i zz59cV_HW1={`HA=o8P=~JywQ*W%DdNr?joF-4BeZF+T>lC<%sk5bfk#5>JU--ixf+ zi>|#{N&bDLh=;bqcmA}uY0l9Ge85k#qJmd~d}GB0K^f?BaXf($4ECp(wp>IVB9*qmNPs0=@@9MG?0bvsGnrLf zT$gg3nE1s2ISJLZoQIfRvccui2D#rnWUh1!oQI6ZYfd=JG1q_@vwHUU)0X&!yew%- z_)TTN2J|=y30h*(|7?L}aPc()L3&GDBm$vW65hVZopAp|*7wrJ zE6_BadJQunHw7CH$G}U$wici>%`ypzauucmyRsjriEob;*XA?-k9@~J%7=6+O-kyC z_Y7**QZ(l6q2r1ANoz+!d};jg6;^B!IkB{sq>qCqGbNtoGfn{**zD|NdE1*d<$9S( zvEV{|y|k#zKVlPlMg{*zH)&5o005(|x9gtu=lz<=Em@md;cr66tWORTq_EckHTA7Y zYn*D-t*J@V(jSdcXn)6>0J7miokK&Q2LSSVJ0Z6(TTB|S)Xm3Lyzt_Ul)R-(C)E6K>ljkJy za8fdi5Sxh6MtX@w!w#@H@zEP!G$cl8`JdfqKGMBRB&vO3kWGKvPW15t+6x_%n zK7={S#TRtNfz}U9;!QSt{LU0*F5@4xPuNBDVi&BL0=}hCf25s8j8Ep43dh@j&+|}^ zm$MyoHxYHi$KRFapgeY<%CXLvFQZXS?k68ha0AEzh@MCJ&&N@A+d+6w56@|2vu47U zzrxv|Gf$2T63=2{z2`z`D-1&;l7#lqbK36U%tN%d#VPrRK^9{l&`K6W#Z0?8;kwl# z@1q7ksdta!-MENDZuBziJ2nCk+0ZCQf-Yf;^#lp84~NW148nHMt1>L?9n7M#@WOkQ zM{oQ5tru|Fv8(c@s`bx8nvVLeQ_%;wOb6`_i(D~P^*D1nSzBvhMl}AlvGRRk4;LZ6 z)wnY^HUNZAtX1JrsfCWmd*c=PN}1%vn0e}5<&4D@!~BbZfT!tosXk>3qIYdVw_(Fl z9_wBgdr^gJG_<7dWJp`&x$!|Rwpb48G{VEvY&GCfuJ4uV@}ITY!Ir=8(!~vx4105d zO)FzVUSg~wUZ%>MrP5i+-IS`^ps&RcBT6E0VY$WD&5&7 z>ENJljq6$;3SHXMEB;B7JUF7X9oQvR>0xtiaPWRXgL5okHihW9w(?Nq{QjtUX!L16 zj>OfFVUtw)C81Ebj`pkuv7-fb2UzElFYeIie$7Bw{laM^ zGw4~vE@juVS@Xp%cDmF;lpA+rP}x;k8po#@&9;Si15{5f@|yQgjFQG*`Ex}=B$r%& zTr3A0aN-Y?>*iAA<;nYg91Nd3{C$Q_YVzK(+-hv{NY{9llfH_o4Q*N%Iti&&RAm&y zI$ncisvM{=$x9Rd3->Po>|5p-jx3z=(-)$tMAAwQV7VG)qWmM!__#oJ=xu zVqD|sx$r2IU#hnkzhz7f{NykqFCCsFu(2rI@%He$%^b%a>L>w$?Fg@r4ro+erh~tn zqrw|avN-|)9pO>>fl9gLU8gSUkCn40PtHY^np++7(BDfgvce1E z$dP^^rVjmsh7KS4hsnkCDf?&skCm>W|N9Xw@w5CdfMz&){m!+?6x~I==MNYjwB;_b z@jB>d)Wy+NSpTC&&=H(^N@4p=BN@PznN#XWxF|Sr+6izH6$Thr8mOSqe=j zE5(1^3czb|SA-MeE*>v~-@E!H+PQjoi(Q+byj4`_GJr7`Z`!`3`vIOxnA)@Y7 zM*x)Ui*SgkBb9yzToZl%&@pIeBW{@A-Ta^MaosFP$NN9RL*BfF9MycUJg;E_0nVkR z9|N3+|23HZ0_Y|VtxMXfG5rzOa4yY-GnO&c|0b^aGtZv05A73B^Ti-CZaTe}xm zLn2Tv8T&*M3v!)Yqq7u!)K>4#a-*YW&7=Unn}E7cLWJ>pRq=uZL>PdN(gcJ%>z(dA z^_-i`Wn!C_Y^}MS2TJZ<-k=JSk=I zZn+5-yDDs0u;f2BHh$Zzbw=B+7ui#5Zb_n}ashFv)EH^4T2$)3OSe zl)NOc4u_aC$KQ-ER#u*zYCyR0CDJ!?Km!qg+HGsJb`ZJVr^T@Id&s%op(ue`!M5*q zl@kogndI24ndr6n#t_9j;HI|!(L{Xsbc%FlLK7~24h~3+rRO3Ewaa;NmQ~!8{&G*Z zDkyxM!N6-PH6e^?t+PO}%Jxb`iXTJXH%GqYUnSAP#{E<_%M&s8+;@_vDuuMnznW@i zBtpyB=NXYR35k`K5G?`w!+X;yl9#Tpt|XsVJTPrRStM-OeE=cym_YG=mPEr(r0;^iSk~U>9$LUQ6pE$IJJ<8adfwH4rQZwXE~18)P*u+D ziM2Z7h-&h<2R)36KZa+B-{__^$TdddcCwGjIQI2YtyNdopjl2fdGTH>l;2>RCr9*U}acGCl09)O>qA8<}J1U7-!Kt&y znN}uML0(zH;EYhjdl#A#k2@lzcr)cn(yI$UKZv@Zn2XPIm}92NmU7>^@rKXT^1{i&FQM|fzRnSB9+>J_r&xlj<* z41KG{EK#n;W+vDJSkjuth-iwsg$pOd`RHMZa#-Y;?Y*Gxt6k{@$ZY#GHLqB-A>qCO zvM6cGOGF{{C#?Ae#`X3Idj57lR153W>iFh4!E804BqR)a>?8~^rQ#p4k;YTvkn6OX zB{a^gwBfS~MXOP6RDxdxO`rhZc2h0^gmgk{8Vym1750PIv;8r1ezap>Sxo=_H#27q zvx7Ns7UPeZvyD})1_4QlmyLXgz@<}-CI)!39}2cV&HBo`dcnLo%ak{PPhiwu>DAC} zSFJ@4K(B-@U(1X9+I6H0*Z+F&K==^9`otEuiZZr~yNxyawXOsA+X-}Jl&qGOIGKU_ z2CWuBfh1l#J`%(l zRl0KZoiWLd;66fov-?zWKp2e7*oK_oBH$O133i6I5@}nPDp=5`Vwc3HPM{#{(QDJ1 zM!=2qQ|CBCQ(r8-i7=;jiy8T1tu@GQOAvg%nXWooQ@B{&e!S?HiuVQa3Xwf?XX z-Q)A{vH-E&^7NeBllwdOnN3M(fUUZ$i@Iz4E_^t7;uU?^k3}h);XSDo$)7GojoWTi z^)>V6I-JX2d;?#f5$QH?{m!l!JRrw2HNCm}d+TqJ%^uN1Z@?4%Vsak5I|<$;8$Pra zruOLud;FXV&Km9*k?+y8jnSmnE}jC!?$?j=1Rq-PqyoL#CPzNWw8LcGag%i?f6Q0g z5gE?hlcBt};qM z(KDZR!bx~nb7JE{icTa9bOqpIl3y~3kcSn-Go0aXlh|fY=%vGdnJEErskPWT9Gh&p zf^>yI@~4k{pBUh|Ez#tE2h2L+B!&Yr{vA;IzX5KQUmP@_WG(x48gd5FrhHJ0%#QOQ zEo}>7`n83biH1TVT}5aaBZAi=9|eChwxw(#>2VGMMAU<(kTONZ>94$&P@$J9iI*JJ zMG=u;OB#BFll>v84L6o#hBrqP=@Vv6*-oCzt2gH6t)uhAhc#0O0)E60{#bZ$JOXV6 z>87o4{M5oZyxe{*b5;Dv@A9+C>+_yGnldD*GZ8E-(uAuOJ_V^G$83W_#XT#&xJ!yB zA0M)93%Z;ym-#upls|tBy((B6{JA(JI2qVxzhOAq9{vd@qj5faPQ2!}O=EFKJInH@ ze`LAfxk>3}2i~laK+0MNc?DZM9tD=C=&a+1pz#g^>6`qMwlB+)0>i!9{7iPCToP~I z>2+j8Q0gd`KUGpFeS@L~^BgJJUh_P%v@bnmXN$jbe|)bB@(q@l{&GV@+4>k}tU3-c zJBR)Wqx9T6J*Atv6t+!-Beg%x?UvrJyDzLh2=|CdRun4kzu~QT7IpD~zyJEDt4R{n zoPB#^9mOI^kvZCe00@o4@-=B=5KCn9$YSkhYUWn^*c?#E@t#)v@frx6unvd~zJi79=KNwBH5E`^03-7_TDE z@8DMth;}5N*T;2*ulqm0HY89U;ce-z4oLVe;jvB3Uz>TZ-!^?3EdyaJBbkv>#{0)N z^VGlo@wCfTvSxRg{D#CK>#Nv~h=eO6y6r0Sri;(?iJXiYLlxnilrsE9`fuOm@Sec0 zy*-|x4^n(Z`_K8ph5q#^z``rS)9`pChK>3DPK}{YPVNu0J*%~cFK45-9);C&{&#j# zbzE*@E!nBk<#>0gnY?O+WikDJrkr}?=hx*c!xf6NX=D^5!yed|rG`LM*Gaf%tE$3G z3~qY@@`}tvrWL4Yu1(TsiDjkN8Et)gW)EhV55yPHNj=#vM1rKdsFTTk(2t${k*>R4`U^xq*e^EEEV9-VnhkD!x6RZy-s=A(wmiomAG+qPbz3Owzh%G_s!`gXy# z!Mol@1fKJcks`|3_VH>vY{{%2QzHO<%sbkkJ44DF2w z;TJ?TEkCp+Tw&|0m9mq^(nmt*5vfm23a+(%ZD(kzT69tva7{TZ{Bc^H8 zotf{n9CD>QvBcB7`Cn7psNc&>zxLwnLMD~z@X!5pa#}M3?pI0gLy{t{ml;E26-*VO z%jU-b(~CzngOxcJlc+R<+4Q$FkBzdNnMKQd4LX*GLDQpN8et zq-l3_+a12&wXx9}@tp6nsp+;^_F++c!@`mR5kPNK;x9vb9^3?CtrCl+NB4^JFG}J% zb!z;9Zx1(JxuyV9Z4K*{)CoBVF>1h(pLnxU)UABOrGd*$2^rG*dla0mW8V_zOjfQG z$YtZ7dZ-HqH8fOqsx-Tcf+0;4vJzc(*80jPnR>%0aWZ-YJ1EhS4LUGSVV5D3f%#A` z+oV@$-?WPhr5$(IR)u!vXiJEQ*ifuaXr&PWVo}pkuosavt7ZN@JWO9bpNq8`E6zdG zNk!$xnXNNA$Bm8o#GD)Vl4V#hyN{TvHFASN)WWf?V)iN${PX!N(E?vx32};bb`K73xMEXt4EZGzc zlxZ-bSCrDQN>#cre+r<)r{KkKBR8RgP5@QBogO)NI{{2BlI zkeC4)^%i1B?#0zj^tAmJ@a~X3V&DuJt z=vwFiQ`#j-A_r_i>?m-dShiH*5!N0PcrOQwB;}JX^+t$irN`slqlULypC78T zou##?F7a^h3EQe8;oouq z9P4W4xnPWF}MsUEl^-JU{))V!)2or);>p&H)J#8VXX#PFLFk zq&adgjmF3O-X*;ihVpfFO*%&#rVRw~cxr3EcQt#nZ6PgC_1VC5_+yJ3{b&c$AaAZf z9N%Gx*eC!4Nu|TpP^2@$MoSl{VzlqAP`8k1fUAi|^%`EY!_)*NSC(Hc-854Wy<(ks z+K^=&TFFs7BXR;6lref7zN9fI!PkH#Ogj-jll4M0*8`4|tGz;G(U)LB5kJFsuYY=TIQl- zAli@Cu`@L)1>FiIZZIWd{OHh)Nz6A1=?()9OjGh&{t==KpM++JaAUDX2KU5cL?ECu z)r1aK%yyOjO6Md8ph1${5@spxtZoIo7V(TM+m1B;IVsl#W1uO2&!ZY6n-dm{BytE{U1ywx;#!RpXJBTg}9 iwMK8v_&n*-iZ`@1wKH=V*ARU*T^go=-b=c_mj4$3eyZ>Q From d63294a548f885b737da8dc6c66b6f38afc4cedf Mon Sep 17 00:00:00 2001 From: iperov Date: Mon, 29 Mar 2021 09:33:50 +0400 Subject: [PATCH 09/47] fix 'raw-rgb' merge --- merger/MergeMasked.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/merger/MergeMasked.py b/merger/MergeMasked.py index 1d3de4a..4e94160 100644 --- a/merger/MergeMasked.py +++ b/merger/MergeMasked.py @@ -142,7 +142,9 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, elif 'raw' in cfg.mode: if cfg.mode == 'raw-rgb': - out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC ) + out_img_face = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC) + out_img_face_mask = cv2.warpAffine( np.ones_like(prd_face_bgr), face_output_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC) + out_img = img_bgr*(1-out_img_face_mask) + out_img_face*out_img_face_mask out_merging_mask_a = img_face_mask_a elif cfg.mode == 'raw-predict': out_img = prd_face_bgr From bcfc794a1b47ec0b59a6403528646ceb24ff3e71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98?= <79362520+legitnull@users.noreply.github.com> Date: Fri, 2 Apr 2021 06:13:30 -0600 Subject: [PATCH 10/47] tensorflow-gpu 2.4.0 depends on h5py~=2.10.0 (#5301) ERROR: Cannot install -r ./DeepFaceLab/requirements-cuda.txt (line 9) and h5py==2.9.0 because these package versions have conflicting dependencies. The conflict is caused by: The user requested h5py==2.9.0 tensorflow-gpu 2.4.0 depends on h5py~=2.10.0 --- requirements-cuda.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-cuda.txt b/requirements-cuda.txt index 5fb89ee..e31bbba 100644 --- a/requirements-cuda.txt +++ b/requirements-cuda.txt @@ -1,10 +1,10 @@ tqdm numpy==1.19.3 -h5py==2.9.0 +h5py==2.10.0 opencv-python==4.1.0.25 ffmpeg-python==0.1.17 scikit-image==0.14.2 scipy==1.4.1 colorama tensorflow-gpu==2.4.0 -pyqt5 \ No newline at end of file +pyqt5 From 243f73fafc369bce21fc57cacecaace3bb6a33ad Mon Sep 17 00:00:00 2001 From: iperov Date: Sat, 10 Apr 2021 09:58:14 +0400 Subject: [PATCH 11/47] core.imagelib: update --- core/imagelib/__init__.py | 4 +- core/imagelib/filters.py | 32 ++++++- core/imagelib/sd/__init__.py | 2 +- core/imagelib/sd/draw.py | 180 ++++++++++++++++++++++++++++++++--- core/imagelib/warp.py | 2 +- 5 files changed, 201 insertions(+), 19 deletions(-) diff --git a/core/imagelib/__init__.py b/core/imagelib/__init__.py index 4e8b83d..682bd65 100644 --- a/core/imagelib/__init__.py +++ b/core/imagelib/__init__.py @@ -24,4 +24,6 @@ from .filters import apply_random_rgb_levels, \ apply_random_hsv_shift, \ apply_random_motion_blur, \ apply_random_gaussian_blur, \ - apply_random_bilinear_resize + apply_random_nearest_resize, \ + apply_random_bilinear_resize, \ + apply_random_jpeg_compress diff --git a/core/imagelib/filters.py b/core/imagelib/filters.py index ba51e07..19655bf 100644 --- a/core/imagelib/filters.py +++ b/core/imagelib/filters.py @@ -66,8 +66,7 @@ def apply_random_gaussian_blur( img, chance, kernel_max_size, mask=None, rnd_sta return result - -def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_state=None ): +def apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINEAR, mask=None, rnd_state=None ): if rnd_state is None: rnd_state = np.random @@ -79,9 +78,34 @@ def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_stat rw = w - int( trg * int(w*(max_size_per/100.0)) ) rh = h - int( trg * int(h*(max_size_per/100.0)) ) - result = cv2.resize (result, (rw,rh), interpolation=cv2.INTER_LINEAR ) - result = cv2.resize (result, (w,h), interpolation=cv2.INTER_LINEAR ) + result = cv2.resize (result, (rw,rh), interpolation=interpolation ) + result = cv2.resize (result, (w,h), interpolation=interpolation ) if mask is not None: result = img*(1-mask) + result*mask + return result + +def apply_random_nearest_resize( img, chance, max_size_per, mask=None, rnd_state=None ): + return apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_NEAREST, mask=mask, rnd_state=rnd_state ) + +def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_state=None ): + return apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINEAR, mask=mask, rnd_state=rnd_state ) + +def apply_random_jpeg_compress( img, chance, mask=None, rnd_state=None ): + if rnd_state is None: + rnd_state = np.random + + result = img + if rnd_state.randint(100) < np.clip(chance, 0, 100): + h,w,c = result.shape + + quality = rnd_state.randint(10,101) + + ret, result = cv2.imencode('.jpg', np.clip(img*255, 0,255).astype(np.uint8), [int(cv2.IMWRITE_JPEG_QUALITY), quality] ) + if ret == True: + result = cv2.imdecode(result, flags=cv2.IMREAD_UNCHANGED) + result = result.astype(np.float32) / 255.0 + if mask is not None: + result = img*(1-mask) + result*mask + return result \ No newline at end of file diff --git a/core/imagelib/sd/__init__.py b/core/imagelib/sd/__init__.py index 2eafd4c..1cddc19 100644 --- a/core/imagelib/sd/__init__.py +++ b/core/imagelib/sd/__init__.py @@ -1,2 +1,2 @@ -from .draw import * +from .draw import circle_faded, random_circle_faded, bezier, random_bezier_split_faded, random_faded from .calc import * \ No newline at end of file diff --git a/core/imagelib/sd/draw.py b/core/imagelib/sd/draw.py index 77e9a46..711ad33 100644 --- a/core/imagelib/sd/draw.py +++ b/core/imagelib/sd/draw.py @@ -1,23 +1,36 @@ """ Signed distance drawing functions using numpy. """ +import math import numpy as np from numpy import linalg as npla -def circle_faded( hw, center, fade_dists ): + +def vector2_dot(a,b): + return a[...,0]*b[...,0]+a[...,1]*b[...,1] + +def vector2_dot2(a): + return a[...,0]*a[...,0]+a[...,1]*a[...,1] + +def vector2_cross(a,b): + return a[...,0]*b[...,1]-a[...,1]*b[...,0] + + +def circle_faded( wh, center, fade_dists ): """ returns drawn circle in [h,w,1] output range [0..1.0] float32 - hw = [h,w] resolution - center = [y,x] center of circle + wh = [w,h] resolution + center = [x,y] center of circle fade_dists = [fade_start, fade_end] fade values """ - h,w = hw + w,h = wh pts = np.empty( (h,w,2), dtype=np.float32 ) - pts[...,1] = np.arange(h)[None,:] pts[...,0] = np.arange(w)[:,None] + pts[...,1] = np.arange(h)[None,:] + pts = pts.reshape ( (h*w, -1) ) pts_dists = np.abs ( npla.norm(pts-center, axis=-1) ) @@ -30,15 +43,158 @@ def circle_faded( hw, center, fade_dists ): pts_dists = np.clip( 1-pts_dists, 0, 1) return pts_dists.reshape ( (h,w,1) ).astype(np.float32) + + +def bezier( wh, A, B, C ): + """ + returns drawn bezier in [h,w,1] output range float32, + every pixel contains signed distance to bezier line + + wh [w,h] resolution + A,B,C points [x,y] + """ -def random_circle_faded ( hw, rnd_state=None ): + width,height = wh + + A = np.float32(A) + B = np.float32(B) + C = np.float32(C) + + + pos = np.empty( (height,width,2), dtype=np.float32 ) + pos[...,0] = np.arange(width)[:,None] + pos[...,1] = np.arange(height)[None,:] + + + a = B-A + b = A - 2.0*B + C + c = a * 2.0 + d = A - pos + + b_dot = vector2_dot(b,b) + if b_dot == 0.0: + return np.zeros( (height,width), dtype=np.float32 ) + + kk = 1.0 / b_dot + + kx = kk * vector2_dot(a,b) + ky = kk * (2.0*vector2_dot(a,a)+vector2_dot(d,b))/3.0; + kz = kk * vector2_dot(d,a); + + res = 0.0; + sgn = 0.0; + + p = ky - kx*kx; + + p3 = p*p*p; + q = kx*(2.0*kx*kx - 3.0*ky) + kz; + h = q*q + 4.0*p3; + + hp_sel = h >= 0.0 + + hp_p = h[hp_sel] + hp_p = np.sqrt(hp_p) + + hp_x = ( np.stack( (hp_p,-hp_p), -1) -q[hp_sel,None] ) / 2.0 + hp_uv = np.sign(hp_x) * np.power( np.abs(hp_x), [1.0/3.0, 1.0/3.0] ) + hp_t = np.clip( hp_uv[...,0] + hp_uv[...,1] - kx, 0.0, 1.0 ) + + hp_t = hp_t[...,None] + hp_q = d[hp_sel]+(c+b*hp_t)*hp_t + hp_res = vector2_dot2(hp_q) + hp_sgn = vector2_cross(c+2.0*b*hp_t,hp_q) + + hl_sel = h < 0.0 + + hl_q = q[hl_sel] + hl_p = p[hl_sel] + hl_z = np.sqrt(-hl_p) + hl_v = np.arccos( hl_q / (hl_p*hl_z*2.0)) / 3.0 + + hl_m = np.cos(hl_v) + hl_n = np.sin(hl_v)*1.732050808; + + hl_t = np.clip( np.stack( (hl_m+hl_m,-hl_n-hl_m,hl_n-hl_m), -1)*hl_z[...,None]-kx, 0.0, 1.0 ); + + hl_d = d[hl_sel] + + hl_qx = hl_d+(c+b*hl_t[...,0:1])*hl_t[...,0:1] + + hl_dx = vector2_dot2(hl_qx) + hl_sx = vector2_cross(c+2.0*b*hl_t[...,0:1], hl_qx) + + hl_qy = hl_d+(c+b*hl_t[...,1:2])*hl_t[...,1:2] + hl_dy = vector2_dot2(hl_qy) + hl_sy = vector2_cross(c+2.0*b*hl_t[...,1:2],hl_qy); + + hl_dx_l_dy = hl_dx=hl_dy + + hl_res = np.empty_like(hl_dx) + hl_res[hl_dx_l_dy] = hl_dx[hl_dx_l_dy] + hl_res[hl_dx_ge_dy] = hl_dy[hl_dx_ge_dy] + + hl_sgn = np.empty_like(hl_sx) + hl_sgn[hl_dx_l_dy] = hl_sx[hl_dx_l_dy] + hl_sgn[hl_dx_ge_dy] = hl_sy[hl_dx_ge_dy] + + res = np.empty( (height, width), np.float32 ) + res[hp_sel] = hp_res + res[hl_sel] = hl_res + + sgn = np.empty( (height, width), np.float32 ) + sgn[hp_sel] = hp_sgn + sgn[hl_sel] = hl_sgn + + sgn = np.sign(sgn) + res = np.sqrt(res)*sgn + + return res[...,None] + +def random_faded(wh): + """ + apply one of them: + random_circle_faded + random_bezier_split_faded + """ + rnd = np.random.randint(2) + if rnd == 0: + return random_circle_faded(wh) + elif rnd == 1: + return random_bezier_split_faded(wh) + +def random_circle_faded ( wh, rnd_state=None ): if rnd_state is None: rnd_state = np.random - h,w = hw - hw_max = max(h,w) - fade_start = rnd_state.randint(hw_max) - fade_end = fade_start + rnd_state.randint(hw_max- fade_start) + w,h = wh + wh_max = max(w,h) + fade_start = rnd_state.randint(wh_max) + fade_end = fade_start + rnd_state.randint(wh_max- fade_start) - return circle_faded (hw, [ rnd_state.randint(h), rnd_state.randint(w) ], - [fade_start, fade_end] ) \ No newline at end of file + return circle_faded (wh, [ rnd_state.randint(h), rnd_state.randint(w) ], + [fade_start, fade_end] ) + +def random_bezier_split_faded( wh ): + width, height = wh + + degA = np.random.randint(360) + degB = np.random.randint(360) + degC = np.random.randint(360) + + deg_2_rad = math.pi / 180.0 + + center = np.float32([width / 2.0, height / 2.0]) + + radius = max(width, height) + + A = center + radius*np.float32([ math.sin( degA * deg_2_rad), math.cos( degA * deg_2_rad) ] ) + B = center + np.random.randint(radius)*np.float32([ math.sin( degB * deg_2_rad), math.cos( degB * deg_2_rad) ] ) + C = center + radius*np.float32([ math.sin( degC * deg_2_rad), math.cos( degC * deg_2_rad) ] ) + + x = bezier( (width,height), A, B, C ) + + x = x / (1+np.random.randint(radius)) + 0.5 + + x = np.clip(x, 0, 1) + return x diff --git a/core/imagelib/warp.py b/core/imagelib/warp.py index ac4f324..37abc36 100644 --- a/core/imagelib/warp.py +++ b/core/imagelib/warp.py @@ -2,7 +2,7 @@ import numpy as np import cv2 from core import randomex -def gen_warp_params (w, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None ): +def gen_warp_params (w, flip=False, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None ): if rnd_state is None: rnd_state = np.random From 457a39c0939e9b195d5f7b48b56ed93e21217521 Mon Sep 17 00:00:00 2001 From: iperov Date: Sat, 10 Apr 2021 09:58:33 +0400 Subject: [PATCH 12/47] mathlib update --- core/mathlib/__init__.py | 74 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/core/mathlib/__init__.py b/core/mathlib/__init__.py index a11e725..7e5fa13 100644 --- a/core/mathlib/__init__.py +++ b/core/mathlib/__init__.py @@ -1,7 +1,12 @@ -import numpy as np import math + +import cv2 +import numpy as np +import numpy.linalg as npla + from .umeyama import umeyama + def get_power_of_two(x): i = 0 while (1 << i) < x: @@ -23,3 +28,70 @@ def rotationMatrixToEulerAngles(R) : def polygon_area(x,y): return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1))) + +def rotate_point(origin, point, deg): + """ + Rotate a point counterclockwise by a given angle around a given origin. + + The angle should be given in radians. + """ + ox, oy = origin + px, py = point + + rad = deg * math.pi / 180.0 + qx = ox + math.cos(rad) * (px - ox) - math.sin(rad) * (py - oy) + qy = oy + math.sin(rad) * (px - ox) + math.cos(rad) * (py - oy) + return np.float32([qx, qy]) + +def transform_points(points, mat, invert=False): + if invert: + mat = cv2.invertAffineTransform (mat) + points = np.expand_dims(points, axis=1) + points = cv2.transform(points, mat, points.shape) + points = np.squeeze(points) + return points + + +def transform_mat(mat, res, tx, ty, rotation, scale): + """ + transform mat in local space of res + scale -> translate -> rotate + + tx,ty float + rotation int degrees + scale float + """ + + + lt, rt, lb, ct = transform_points ( np.float32([(0,0),(res,0),(0,res),(res / 2, res/2) ]),mat, True) + + hor_v = (rt-lt).astype(np.float32) + hor_size = npla.norm(hor_v) + hor_v /= hor_size + + ver_v = (lb-lt).astype(np.float32) + ver_size = npla.norm(ver_v) + ver_v /= ver_size + + bt_diag_vec = (rt-ct).astype(np.float32) + half_diag_len = npla.norm(bt_diag_vec) + bt_diag_vec /= half_diag_len + + tb_diag_vec = np.float32( [ -bt_diag_vec[1], bt_diag_vec[0] ] ) + + rt = ct + bt_diag_vec*half_diag_len*scale + lb = ct - bt_diag_vec*half_diag_len*scale + lt = ct - tb_diag_vec*half_diag_len*scale + + rt[0] += tx*hor_size + lb[0] += tx*hor_size + lt[0] += tx*hor_size + rt[1] += ty*ver_size + lb[1] += ty*ver_size + lt[1] += ty*ver_size + + rt = rotate_point(ct, rt, rotation) + lb = rotate_point(ct, lb, rotation) + lt = rotate_point(ct, lt, rotation) + + return cv2.getAffineTransform( np.float32([lt, rt, lb]), np.float32([ [0,0], [res,0], [0,res] ]) ) From d676a365f78ab0cb3e270dd81979cd1200d2a929 Mon Sep 17 00:00:00 2001 From: iperov Date: Sat, 10 Apr 2021 09:59:49 +0400 Subject: [PATCH 13/47] XSeg training generator: added additional sample augmentations --- samplelib/SampleGeneratorFaceXSeg.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index 9c7cf6e..9b15e28 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -79,6 +79,7 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): random_bilinear_resize_chance, random_bilinear_resize_max_size_per = 25,75 motion_blur_chance, motion_blur_mb_max_size = 25, 5 gaussian_blur_chance, gaussian_blur_kernel_max_size = 25, 5 + random_jpeg_compress_chance = 25 def gen_img_mask(sample): img = sample.load_bgr() @@ -130,14 +131,15 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): bg_img, bg_mask = gen_img_mask(bg_sample) bg_wp = imagelib.gen_warp_params(resolution, True, rotation_range=[-180,180], scale_range=[-0.10, 0.10], tx_range=[-0.10, 0.10], ty_range=[-0.10, 0.10] ) - bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=False) + bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=True) bg_mask = imagelib.warp_by_params (bg_wp, bg_mask, can_warp=False, can_transform=True, can_flip=True, border_replicate=False) - c_mask = (1-bg_mask) * (1-mask) - img = img*(1-c_mask) + bg_img * c_mask + c_mask = 1.0 - (1-bg_mask) * (1-mask) + rnd = np.random.uniform() + img = img*(c_mask) + img*(1-c_mask)*rnd + bg_img*(1-c_mask)*(1-rnd) warp_params = imagelib.gen_warp_params(resolution, random_flip, rotation_range=rotation_range, scale_range=scale_range, tx_range=tx_range, ty_range=ty_range ) - img = imagelib.warp_by_params (warp_params, img, can_warp=True, can_transform=True, can_flip=True, border_replicate=False) + img = imagelib.warp_by_params (warp_params, img, can_warp=True, can_transform=True, can_flip=True, border_replicate=True) mask = imagelib.warp_by_params (warp_params, mask, can_warp=True, can_transform=True, can_flip=True, border_replicate=False) img = np.clip(img.astype(np.float32), 0, 1) @@ -152,8 +154,14 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution])) img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution])) - img = imagelib.apply_random_bilinear_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution])) - + + if np.random.randint(2) == 0: + img = imagelib.apply_random_nearest_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution])) + else: + img = imagelib.apply_random_bilinear_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution])) + + img = imagelib.apply_random_jpeg_compress( img, random_jpeg_compress_chance, mask=sd.random_circle_faded ([resolution,resolution])) + if data_format == "NCHW": img = np.transpose(img, (2,0,1) ) mask = np.transpose(mask, (2,0,1) ) From bee8628d77a825808594db340994ec8e8d4d66fb Mon Sep 17 00:00:00 2001 From: iperov Date: Mon, 12 Apr 2021 16:52:39 +0400 Subject: [PATCH 14/47] xseg sample generator: additional sample augmentation --- samplelib/SampleGeneratorFaceXSeg.py | 78 +++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 12 deletions(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index 9b15e28..f29653d 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -6,7 +6,7 @@ from enum import IntEnum import cv2 import numpy as np - +from pathlib import Path from core import imagelib, mplib, pathex from core.imagelib import sd from core.cv2ex import * @@ -31,7 +31,7 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): if len(seg_sample_idxs) == 0: raise Exception(f"No segmented faces found.") else: - io.log_info(f"Using {len(seg_sample_idxs)} xseg labeled samples.") + io.log_info(f"Using {len(seg_sample_idxs)} xseg labeled samples.") else: io.log_info(f"Using {len(seg_sample_idxs)} segmented samples.") @@ -40,11 +40,11 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): else: self.generators_count = max(1, generators_count) + args = (samples, seg_sample_idxs, resolution, face_type, data_format) if self.debug: - self.generators = [ThisThreadGenerator ( self.batch_func, (samples, seg_sample_idxs, resolution, face_type, data_format) )] + self.generators = [ThisThreadGenerator ( self.batch_func, args )] else: - self.generators = [SubprocessGenerator ( self.batch_func, (samples, seg_sample_idxs, resolution, face_type, data_format), start_now=False ) \ - for i in range(self.generators_count) ] + self.generators = [SubprocessGenerator ( self.batch_func, args, start_now=False ) for i in range(self.generators_count) ] SubprocessGenerator.start_in_parallel( self.generators ) @@ -84,11 +84,11 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): def gen_img_mask(sample): img = sample.load_bgr() h,w,c = img.shape - + if sample.seg_ie_polys.has_polys(): mask = np.zeros ((h,w,1), dtype=np.float32) sample.seg_ie_polys.overlay_mask(mask) - elif sample.has_xseg_mask(): + elif sample.has_xseg_mask(): mask = sample.get_xseg_mask() mask[mask < 0.5] = 0.0 mask[mask >= 0.5] = 1.0 @@ -122,7 +122,6 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): img, mask = gen_img_mask(sample) if np.random.randint(2) == 0: - if len(bg_shuffle_idxs) == 0: bg_shuffle_idxs = seg_sample_idxs.copy() np.random.shuffle(bg_shuffle_idxs) @@ -133,6 +132,11 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): bg_wp = imagelib.gen_warp_params(resolution, True, rotation_range=[-180,180], scale_range=[-0.10, 0.10], tx_range=[-0.10, 0.10], ty_range=[-0.10, 0.10] ) bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=True) bg_mask = imagelib.warp_by_params (bg_wp, bg_mask, can_warp=False, can_transform=True, can_flip=True, border_replicate=False) + bg_img = bg_img*(1-bg_mask) + if np.random.randint(2) == 0: + bg_img = imagelib.apply_random_hsv_shift(bg_img) + else: + bg_img = imagelib.apply_random_rgb_levels(bg_img) c_mask = 1.0 - (1-bg_mask) * (1-mask) rnd = np.random.uniform() @@ -152,16 +156,22 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): else: img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution])) + if np.random.randint(2) == 0: + # random face flare + krn = np.random.randint( resolution//4, resolution ) + krn = krn - krn % 2 + 1 + img = img + cv2.GaussianBlur(img*mask, (krn,krn), 0) + img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution])) img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution])) - if np.random.randint(2) == 0: img = imagelib.apply_random_nearest_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution])) else: img = imagelib.apply_random_bilinear_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution])) - + img = np.clip(img, 0, 1) + img = imagelib.apply_random_jpeg_compress( img, random_jpeg_compress_chance, mask=sd.random_circle_faded ([resolution,resolution])) - + if data_format == "NCHW": img = np.transpose(img, (2,0,1) ) mask = np.transpose(mask, (2,0,1) ) @@ -229,4 +239,48 @@ class SegmentedSampleFilterSubprocessor(Subprocessor): if self.count_xseg_mask: return idx, self.samples[idx].has_xseg_mask() else: - return idx, self.samples[idx].seg_ie_polys.get_pts_count() != 0 \ No newline at end of file + return idx, self.samples[idx].seg_ie_polys.get_pts_count() != 0 + +""" + bg_path = None + for path in paths: + bg_path = Path(path) / 'backgrounds' + if bg_path.exists(): + + break + if bg_path is None: + io.log_info(f'Random backgrounds will not be used. Place no face jpg images to aligned\backgrounds folder. ') + bg_pathes = None + else: + bg_pathes = pathex.get_image_paths(bg_path, image_extensions=['.jpg'], return_Path_class=True) + io.log_info(f'Using {len(bg_pathes)} random backgrounds from {bg_path}') + +if bg_pathes is not None: + bg_path = bg_pathes[ np.random.randint(len(bg_pathes)) ] + + bg_img = cv2_imread(bg_path) + if bg_img is not None: + bg_img = bg_img.astype(np.float32) / 255.0 + bg_img = imagelib.normalize_channels(bg_img, 3) + + bg_img = imagelib.random_crop(bg_img, resolution, resolution) + bg_img = cv2.resize(bg_img, (resolution, resolution), interpolation=cv2.INTER_LINEAR) + + if np.random.randint(2) == 0: + bg_img = imagelib.apply_random_hsv_shift(bg_img) + else: + bg_img = imagelib.apply_random_rgb_levels(bg_img) + + bg_wp = imagelib.gen_warp_params(resolution, True, rotation_range=[-180,180], scale_range=[0,0], tx_range=[0,0], ty_range=[0,0]) + bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=True) + + bg = img*(1-mask) + fg = img*mask + + c_mask = sd.random_circle_faded ([resolution,resolution]) + bg = ( bg_img*c_mask + bg*(1-c_mask) )*(1-mask) + + img = fg+bg + + else: +""" \ No newline at end of file From 65432d0c3dc03b014e53c8e3d6547996364a7f4b Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 15 Apr 2021 21:43:18 +0400 Subject: [PATCH 15/47] imagelib: random crop func (unused) --- core/imagelib/__init__.py | 2 +- core/imagelib/common.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/core/imagelib/__init__.py b/core/imagelib/__init__.py index 682bd65..23fb889 100644 --- a/core/imagelib/__init__.py +++ b/core/imagelib/__init__.py @@ -14,7 +14,7 @@ from .reduce_colors import reduce_colors from .color_transfer import color_transfer, color_transfer_mix, color_transfer_sot, color_transfer_mkl, color_transfer_idt, color_hist_match, reinhard_color_transfer, linear_color_transfer -from .common import normalize_channels, cut_odd_image, overlay_alpha_image +from .common import random_crop, normalize_channels, cut_odd_image, overlay_alpha_image from .SegIEPolys import * diff --git a/core/imagelib/common.py b/core/imagelib/common.py index 6566819..4219d7d 100644 --- a/core/imagelib/common.py +++ b/core/imagelib/common.py @@ -1,5 +1,16 @@ import numpy as np +def random_crop(img, w, h): + height, width = img.shape[:2] + + h_rnd = height - h + w_rnd = width - w + + y = np.random.randint(0, h_rnd) if h_rnd > 0 else 0 + x = np.random.randint(0, w_rnd) if w_rnd > 0 else 0 + + return img[y:y+height, x:x+width] + def normalize_channels(img, target_channels): img_shape_len = len(img.shape) if img_shape_len == 2: From af0b3904fcd3269832b7f622785fd1dc45291e29 Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 15 Apr 2021 21:43:47 +0400 Subject: [PATCH 16/47] XSeg trainer: additional dssim loss. --- models/Model_XSeg/Model.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/models/Model_XSeg/Model.py b/models/Model_XSeg/Model.py index 4ab23fc..a257c11 100644 --- a/models/Model_XSeg/Model.py +++ b/models/Model_XSeg/Model.py @@ -95,6 +95,9 @@ class XSegModel(ModelBase): gpu_pred_list.append(gpu_pred_t) gpu_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=gpu_target_t, logits=gpu_pred_logits_t), axis=[1,2,3]) + gpu_loss += tf.reduce_mean ( 0.1*nn.dssim(gpu_target_t, gpu_pred_logits_t, max_val=1.0, filter_size=3), axis=[1]) + + gpu_losses += [gpu_loss] gpu_loss_gvs += [ nn.gradients ( gpu_loss, self.model.get_weights() ) ] From 93fe480eca2bb3346cac3e6eb02bc26013d38994 Mon Sep 17 00:00:00 2001 From: iperov Date: Mon, 19 Apr 2021 13:35:14 +0400 Subject: [PATCH 17/47] revert --- models/Model_XSeg/Model.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/models/Model_XSeg/Model.py b/models/Model_XSeg/Model.py index a257c11..cd6eea3 100644 --- a/models/Model_XSeg/Model.py +++ b/models/Model_XSeg/Model.py @@ -95,9 +95,7 @@ class XSegModel(ModelBase): gpu_pred_list.append(gpu_pred_t) gpu_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=gpu_target_t, logits=gpu_pred_logits_t), axis=[1,2,3]) - gpu_loss += tf.reduce_mean ( 0.1*nn.dssim(gpu_target_t, gpu_pred_logits_t, max_val=1.0, filter_size=3), axis=[1]) - - + gpu_losses += [gpu_loss] gpu_loss_gvs += [ nn.gradients ( gpu_loss, self.model.get_weights() ) ] From 7a08c0c1d325e92fc5f400c5ed99bdbf5e3d32b0 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 20 Apr 2021 16:45:20 +0400 Subject: [PATCH 18/47] Extractor: extract from dflimg is now the same as from any image --- mainscripts/Extractor.py | 106 +++++++++++++++------------------------ 1 file changed, 40 insertions(+), 66 deletions(-) diff --git a/mainscripts/Extractor.py b/mainscripts/Extractor.py index 1d5c80f..365804f 100644 --- a/mainscripts/Extractor.py +++ b/mainscripts/Extractor.py @@ -97,9 +97,6 @@ class ExtractSubprocessor(Subprocessor): h, w, c = image.shape - dflimg = DFLIMG.load (filepath) - extract_from_dflimg = (h == w and (dflimg is not None and dflimg.has_data()) ) - if 'rects' in self.type or self.type == 'all': data = ExtractSubprocessor.Cli.rects_stage (data=data, image=image, @@ -110,7 +107,6 @@ class ExtractSubprocessor(Subprocessor): if 'landmarks' in self.type or self.type == 'all': data = ExtractSubprocessor.Cli.landmarks_stage (data=data, image=image, - extract_from_dflimg=extract_from_dflimg, landmarks_extractor=self.landmarks_extractor, rects_extractor=self.rects_extractor, ) @@ -121,7 +117,6 @@ class ExtractSubprocessor(Subprocessor): face_type=self.face_type, image_size=self.image_size, jpeg_quality=self.jpeg_quality, - extract_from_dflimg=extract_from_dflimg, output_debug_path=self.output_debug_path, final_output_path=self.final_output_path, ) @@ -161,7 +156,6 @@ class ExtractSubprocessor(Subprocessor): @staticmethod def landmarks_stage(data, image, - extract_from_dflimg, landmarks_extractor, rects_extractor, ): @@ -176,7 +170,7 @@ class ExtractSubprocessor(Subprocessor): elif data.rects_rotation == 270: rotated_image = image.swapaxes( 0,1 )[::-1,:,:] - data.landmarks = landmarks_extractor.extract (rotated_image, data.rects, rects_extractor if (not extract_from_dflimg and data.landmarks_accurate) else None, is_bgr=True) + data.landmarks = landmarks_extractor.extract (rotated_image, data.rects, rects_extractor if (data.landmarks_accurate) else None, is_bgr=True) if data.rects_rotation != 0: for i, (rect, lmrks) in enumerate(zip(data.rects, data.landmarks)): new_rect, new_lmrks = rect, lmrks @@ -207,7 +201,6 @@ class ExtractSubprocessor(Subprocessor): face_type, image_size, jpeg_quality, - extract_from_dflimg = False, output_debug_path=None, final_output_path=None, ): @@ -219,72 +212,53 @@ class ExtractSubprocessor(Subprocessor): if output_debug_path is not None: debug_image = image.copy() - if extract_from_dflimg and len(rects) != 1: - #if re-extracting from dflimg and more than 1 or zero faces detected - dont process and just copy it - print("extract_from_dflimg and len(rects) != 1", filepath ) - output_filepath = final_output_path / filepath.name - if filepath != str(output_file): - shutil.copy ( str(filepath), str(output_filepath) ) - data.final_output_files.append (output_filepath) - else: - face_idx = 0 - for rect, image_landmarks in zip( rects, landmarks ): + face_idx = 0 + for rect, image_landmarks in zip( rects, landmarks ): + if image_landmarks is None: + continue - if extract_from_dflimg and face_idx > 1: - #cannot extract more than 1 face from dflimg - break + rect = np.array(rect) - if image_landmarks is None: + if face_type == FaceType.MARK_ONLY: + image_to_face_mat = None + face_image = image + face_image_landmarks = image_landmarks + else: + image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, image_size, face_type) + + face_image = cv2.warpAffine(image, image_to_face_mat, (image_size, image_size), cv2.INTER_LANCZOS4) + face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat) + + landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,image_size-1), (image_size-1, image_size-1), (image_size-1,0) ], image_to_face_mat, True) + + rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]).astype(np.float32), np.array(rect[[1,1,3,3]]).astype(np.float32)) + landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0].astype(np.float32), landmarks_bbox[:,1].astype(np.float32) ) + + if not data.manual and face_type <= FaceType.FULL_NO_ALIGN and landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area continue - rect = np.array(rect) + if output_debug_path is not None: + LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, face_type, image_size, transparent_mask=True) - if face_type == FaceType.MARK_ONLY: - image_to_face_mat = None - face_image = image - face_image_landmarks = image_landmarks - else: - image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, image_size, face_type) + output_path = final_output_path + if data.force_output_path is not None: + output_path = data.force_output_path - face_image = cv2.warpAffine(image, image_to_face_mat, (image_size, image_size), cv2.INTER_LANCZOS4) - face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat) + output_filepath = output_path / f"{filepath.stem}_{face_idx}.jpg" + cv2_imwrite(output_filepath, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality ] ) - landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,image_size-1), (image_size-1, image_size-1), (image_size-1,0) ], image_to_face_mat, True) + dflimg = DFLJPG.load(output_filepath) + dflimg.set_face_type(FaceType.toString(face_type)) + dflimg.set_landmarks(face_image_landmarks.tolist()) + dflimg.set_source_filename(filepath.name) + dflimg.set_source_rect(rect) + dflimg.set_source_landmarks(image_landmarks.tolist()) + dflimg.set_image_to_face_mat(image_to_face_mat) + dflimg.save() - rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]).astype(np.float32), np.array(rect[[1,1,3,3]]).astype(np.float32)) - landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0].astype(np.float32), landmarks_bbox[:,1].astype(np.float32) ) - - if not data.manual and face_type <= FaceType.FULL_NO_ALIGN and landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area - continue - - if output_debug_path is not None: - LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, face_type, image_size, transparent_mask=True) - - output_path = final_output_path - if data.force_output_path is not None: - output_path = data.force_output_path - - if extract_from_dflimg and filepath.suffix == '.jpg': - #if extracting from dflimg and jpg copy it in order not to lose quality - output_filepath = output_path / filepath.name - if filepath != output_filepath: - shutil.copy ( str(filepath), str(output_filepath) ) - else: - output_filepath = output_path / f"{filepath.stem}_{face_idx}.jpg" - cv2_imwrite(output_filepath, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality ] ) - - dflimg = DFLJPG.load(output_filepath) - dflimg.set_face_type(FaceType.toString(face_type)) - dflimg.set_landmarks(face_image_landmarks.tolist()) - dflimg.set_source_filename(filepath.name) - dflimg.set_source_rect(rect) - dflimg.set_source_landmarks(image_landmarks.tolist()) - dflimg.set_image_to_face_mat(image_to_face_mat) - dflimg.save() - - data.final_output_files.append (output_filepath) - face_idx += 1 - data.faces_detected = face_idx + data.final_output_files.append (output_filepath) + face_idx += 1 + data.faces_detected = face_idx if output_debug_path is not None: cv2_imwrite( output_debug_path / (filepath.stem+'.jpg'), debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] ) From dcf146cc16f84b5d690abe1a8589a574ccdb1406 Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 22 Apr 2021 18:16:42 +0400 Subject: [PATCH 19/47] imagelib : apply_random_sharpen --- core/imagelib/__init__.py | 1 + core/imagelib/filters.py | 74 ++++++++++++++++++++++++--------------- 2 files changed, 47 insertions(+), 28 deletions(-) diff --git a/core/imagelib/__init__.py b/core/imagelib/__init__.py index 23fb889..a4f1482 100644 --- a/core/imagelib/__init__.py +++ b/core/imagelib/__init__.py @@ -22,6 +22,7 @@ from .blursharpen import LinearMotionBlur, blursharpen from .filters import apply_random_rgb_levels, \ apply_random_hsv_shift, \ + apply_random_sharpen, \ apply_random_motion_blur, \ apply_random_gaussian_blur, \ apply_random_nearest_resize, \ diff --git a/core/imagelib/filters.py b/core/imagelib/filters.py index 19655bf..eb45ecb 100644 --- a/core/imagelib/filters.py +++ b/core/imagelib/filters.py @@ -1,47 +1,65 @@ import numpy as np -from .blursharpen import LinearMotionBlur +from .blursharpen import LinearMotionBlur, blursharpen import cv2 def apply_random_rgb_levels(img, mask=None, rnd_state=None): if rnd_state is None: rnd_state = np.random np_rnd = rnd_state.rand - + inBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32) inWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32) inGamma = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32) - + outBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32) outWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32) result = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 ) result = ( result ** (1/inGamma) ) * (outWhite - outBlack) + outBlack result = np.clip(result, 0, 1) - + if mask is not None: result = img*(1-mask) + result*mask - + return result - + def apply_random_hsv_shift(img, mask=None, rnd_state=None): if rnd_state is None: rnd_state = np.random - + h, s, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) h = ( h + rnd_state.randint(360) ) % 360 s = np.clip ( s + rnd_state.random()-0.5, 0, 1 ) - v = np.clip ( v + rnd_state.random()-0.5, 0, 1 ) - + v = np.clip ( v + rnd_state.random()-0.5, 0, 1 ) + result = np.clip( cv2.cvtColor(cv2.merge([h, s, v]), cv2.COLOR_HSV2BGR) , 0, 1 ) if mask is not None: result = img*(1-mask) + result*mask - + return result - + +def apply_random_sharpen( img, chance, kernel_max_size, mask=None, rnd_state=None ): + if rnd_state is None: + rnd_state = np.random + + sharp_rnd_kernel = rnd_state.randint(kernel_max_size)+1 + + result = img + if rnd_state.randint(100) < np.clip(chance, 0, 100): + if rnd_state.randint(2) == 0: + result = blursharpen(result, 1, sharp_rnd_kernel, rnd_state.randint(10) ) + else: + result = blursharpen(result, 2, sharp_rnd_kernel, rnd_state.randint(50) ) + + if mask is not None: + result = img*(1-mask) + result*mask + + return result + def apply_random_motion_blur( img, chance, mb_max_size, mask=None, rnd_state=None ): if rnd_state is None: rnd_state = np.random - + mblur_rnd_kernel = rnd_state.randint(mb_max_size)+1 mblur_rnd_deg = rnd_state.randint(360) @@ -50,22 +68,22 @@ def apply_random_motion_blur( img, chance, mb_max_size, mask=None, rnd_state=Non result = LinearMotionBlur (result, mblur_rnd_kernel, mblur_rnd_deg ) if mask is not None: result = img*(1-mask) + result*mask - + return result - + def apply_random_gaussian_blur( img, chance, kernel_max_size, mask=None, rnd_state=None ): if rnd_state is None: rnd_state = np.random - + result = img if rnd_state.randint(100) < np.clip(chance, 0, 100): gblur_rnd_kernel = rnd_state.randint(kernel_max_size)*2+1 result = cv2.GaussianBlur(result, (gblur_rnd_kernel,)*2 , 0) if mask is not None: result = img*(1-mask) + result*mask - + return result - + def apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINEAR, mask=None, rnd_state=None ): if rnd_state is None: rnd_state = np.random @@ -73,24 +91,24 @@ def apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINE result = img if rnd_state.randint(100) < np.clip(chance, 0, 100): h,w,c = result.shape - + trg = rnd_state.rand() - rw = w - int( trg * int(w*(max_size_per/100.0)) ) - rh = h - int( trg * int(h*(max_size_per/100.0)) ) - + rw = w - int( trg * int(w*(max_size_per/100.0)) ) + rh = h - int( trg * int(h*(max_size_per/100.0)) ) + result = cv2.resize (result, (rw,rh), interpolation=interpolation ) result = cv2.resize (result, (w,h), interpolation=interpolation ) if mask is not None: result = img*(1-mask) + result*mask - + return result - + def apply_random_nearest_resize( img, chance, max_size_per, mask=None, rnd_state=None ): return apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_NEAREST, mask=mask, rnd_state=rnd_state ) - + def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_state=None ): return apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINEAR, mask=mask, rnd_state=rnd_state ) - + def apply_random_jpeg_compress( img, chance, mask=None, rnd_state=None ): if rnd_state is None: rnd_state = np.random @@ -98,14 +116,14 @@ def apply_random_jpeg_compress( img, chance, mask=None, rnd_state=None ): result = img if rnd_state.randint(100) < np.clip(chance, 0, 100): h,w,c = result.shape - + quality = rnd_state.randint(10,101) - + ret, result = cv2.imencode('.jpg', np.clip(img*255, 0,255).astype(np.uint8), [int(cv2.IMWRITE_JPEG_QUALITY), quality] ) if ret == True: result = cv2.imdecode(result, flags=cv2.IMREAD_UNCHANGED) result = result.astype(np.float32) / 255.0 if mask is not None: result = img*(1-mask) + result*mask - + return result \ No newline at end of file From fc4a49c3e7feac2bcc51e986e10eff0e4f2d12dd Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 22 Apr 2021 18:18:02 +0400 Subject: [PATCH 20/47] improved xseg sample generator : added random sharpen --- samplelib/SampleGeneratorFaceXSeg.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index f29653d..b2019f4 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -77,6 +77,7 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): ty_range=[-0.05, 0.05] random_bilinear_resize_chance, random_bilinear_resize_max_size_per = 25,75 + sharpen_chance, sharpen_kernel_max_size = 25, 5 motion_blur_chance, motion_blur_mb_max_size = 25, 5 gaussian_blur_chance, gaussian_blur_kernel_max_size = 25, 5 random_jpeg_compress_chance = 25 @@ -162,8 +163,12 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): krn = krn - krn % 2 + 1 img = img + cv2.GaussianBlur(img*mask, (krn,krn), 0) - img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution])) - img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution])) + if np.random.randint(2) == 0: + img = imagelib.apply_random_sharpen( img, sharpen_chance, sharpen_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution])) + else: + img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution])) + img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution])) + if np.random.randint(2) == 0: img = imagelib.apply_random_nearest_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution])) else: From fdb143ff473ca90fdf3d743df1794cbc007c368f Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 22 Apr 2021 18:19:15 +0400 Subject: [PATCH 21/47] added AMD/Intel cards support via DirectX12 ( DirectML backend ) --- core/leras/device.py | 203 ++++++++++++++++++++++------------ core/leras/nn.py | 41 +++---- facelib/FaceEnhancer.py | 4 +- facelib/XSegNet.py | 4 +- models/Model_Quick96/Model.py | 6 +- models/Model_SAEHD/Model.py | 20 ++-- models/Model_XSeg/Model.py | 4 +- 7 files changed, 166 insertions(+), 116 deletions(-) diff --git a/core/leras/device.py b/core/leras/device.py index 4d157f0..2f65382 100644 --- a/core/leras/device.py +++ b/core/leras/device.py @@ -1,12 +1,19 @@ import sys import ctypes import os +import multiprocessing +import json +import time +from pathlib import Path +from core.interact import interact as io + class Device(object): - def __init__(self, index, name, total_mem, free_mem, cc=0): + def __init__(self, index, tf_dev_type, name, total_mem, free_mem): self.index = index + self.tf_dev_type = tf_dev_type self.name = name - self.cc = cc + self.total_mem = total_mem self.total_mem_gb = total_mem / 1024**3 self.free_mem = free_mem @@ -82,12 +89,134 @@ class Devices(object): result.append (device) return Devices(result) + @staticmethod + def _get_tf_devices_proc(q : multiprocessing.Queue): + + compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache_ALL') + os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path) + if not compute_cache_path.exists(): + io.log_info("Caching GPU kernels...") + compute_cache_path.mkdir(parents=True, exist_ok=True) + + import tensorflow + + tf_version = tensorflow.version.VERSION + #if tf_version is None: + # tf_version = tensorflow.version.GIT_VERSION + if tf_version[0] == 'v': + tf_version = tf_version[1:] + if tf_version[0] == '2': + tf = tensorflow.compat.v1 + else: + tf = tensorflow + + import logging + # Disable tensorflow warnings + tf_logger = logging.getLogger('tensorflow') + tf_logger.setLevel(logging.ERROR) + + from tensorflow.python.client import device_lib + + devices = [] + + physical_devices = device_lib.list_local_devices() + physical_devices_f = {} + for dev in physical_devices: + dev_type = dev.device_type + dev_tf_name = dev.name + dev_tf_name = dev_tf_name[ dev_tf_name.index(dev_type) : ] + + dev_idx = int(dev_tf_name.split(':')[-1]) + + if dev_type in ['GPU','DML']: + dev_name = dev_tf_name + + dev_desc = dev.physical_device_desc + if len(dev_desc) != 0: + if dev_desc[0] == '{': + dev_desc_json = json.loads(dev_desc) + dev_desc_json_name = dev_desc_json.get('name',None) + if dev_desc_json_name is not None: + dev_name = dev_desc_json_name + else: + for param, value in ( v.split(':') for v in dev_desc.split(',') ): + param = param.strip() + value = value.strip() + if param == 'name': + dev_name = value + break + + physical_devices_f[dev_idx] = (dev_type, dev_name, dev.memory_limit) + + q.put(physical_devices_f) + time.sleep(0.1) + + @staticmethod def initialize_main_env(): - os.environ['NN_DEVICES_INITIALIZED'] = '1' - os.environ['NN_DEVICES_COUNT'] = '0' + if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 0: + return + + if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(): + os.environ.pop('CUDA_VISIBLE_DEVICES') os.environ['CUDA_​CACHE_​MAXSIZE'] = '2147483647' + os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2' + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tf log errors only + + q = multiprocessing.Queue() + p = multiprocessing.Process(target=Devices._get_tf_devices_proc, args=(q,), daemon=True) + p.start() + p.join() + + visible_devices = q.get() + + os.environ['NN_DEVICES_INITIALIZED'] = '1' + os.environ['NN_DEVICES_COUNT'] = str(len(visible_devices)) + + for i in visible_devices: + dev_type, name, total_mem = visible_devices[i] + + os.environ[f'NN_DEVICE_{i}_TF_DEV_TYPE'] = dev_type + os.environ[f'NN_DEVICE_{i}_NAME'] = name + os.environ[f'NN_DEVICE_{i}_TOTAL_MEM'] = str(total_mem) + os.environ[f'NN_DEVICE_{i}_FREE_MEM'] = str(total_mem) + + + + @staticmethod + def getDevices(): + if Devices.all_devices is None: + if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 1: + raise Exception("nn devices are not initialized. Run initialize_main_env() in main process.") + devices = [] + for i in range ( int(os.environ['NN_DEVICES_COUNT']) ): + devices.append ( Device(index=i, + tf_dev_type=os.environ[f'NN_DEVICE_{i}_TF_DEV_TYPE'], + name=os.environ[f'NN_DEVICE_{i}_NAME'], + total_mem=int(os.environ[f'NN_DEVICE_{i}_TOTAL_MEM']), + free_mem=int(os.environ[f'NN_DEVICE_{i}_FREE_MEM']), ) + ) + Devices.all_devices = Devices(devices) + + return Devices.all_devices + +""" + + + # {'name' : name.split(b'\0', 1)[0].decode(), + # 'total_mem' : totalMem.value + # } + + + + + + return + + + + min_cc = int(os.environ.get("TF_MIN_REQ_CAP", 35)) libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll') for libname in libnames: @@ -139,70 +268,4 @@ class Devices(object): os.environ[f'NN_DEVICE_{i}_TOTAL_MEM'] = str(device['total_mem']) os.environ[f'NN_DEVICE_{i}_FREE_MEM'] = str(device['free_mem']) os.environ[f'NN_DEVICE_{i}_CC'] = str(device['cc']) - - @staticmethod - def getDevices(): - if Devices.all_devices is None: - if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 1: - raise Exception("nn devices are not initialized. Run initialize_main_env() in main process.") - devices = [] - for i in range ( int(os.environ['NN_DEVICES_COUNT']) ): - devices.append ( Device(index=i, - name=os.environ[f'NN_DEVICE_{i}_NAME'], - total_mem=int(os.environ[f'NN_DEVICE_{i}_TOTAL_MEM']), - free_mem=int(os.environ[f'NN_DEVICE_{i}_FREE_MEM']), - cc=int(os.environ[f'NN_DEVICE_{i}_CC']) )) - Devices.all_devices = Devices(devices) - - return Devices.all_devices - -""" -if Devices.all_devices is None: - min_cc = int(os.environ.get("TF_MIN_REQ_CAP", 35)) - - libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll') - for libname in libnames: - try: - cuda = ctypes.CDLL(libname) - except: - continue - else: - break - else: - return Devices([]) - - nGpus = ctypes.c_int() - name = b' ' * 200 - cc_major = ctypes.c_int() - cc_minor = ctypes.c_int() - freeMem = ctypes.c_size_t() - totalMem = ctypes.c_size_t() - - result = ctypes.c_int() - device = ctypes.c_int() - context = ctypes.c_void_p() - error_str = ctypes.c_char_p() - - devices = [] - - if cuda.cuInit(0) == 0 and \ - cuda.cuDeviceGetCount(ctypes.byref(nGpus)) == 0: - for i in range(nGpus.value): - if cuda.cuDeviceGet(ctypes.byref(device), i) != 0 or \ - cuda.cuDeviceGetName(ctypes.c_char_p(name), len(name), device) != 0 or \ - cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor), device) != 0: - continue - - if cuda.cuCtxCreate_v2(ctypes.byref(context), 0, device) == 0: - if cuda.cuMemGetInfo_v2(ctypes.byref(freeMem), ctypes.byref(totalMem)) == 0: - cc = cc_major.value * 10 + cc_minor.value - if cc >= min_cc: - devices.append ( Device(index=i, - name=name.split(b'\0', 1)[0].decode(), - total_mem=totalMem.value, - free_mem=freeMem.value, - cc=cc) ) - cuda.cuCtxDetach(context) - Devices.all_devices = Devices(devices) - return Devices.all_devices """ \ No newline at end of file diff --git a/core/leras/nn.py b/core/leras/nn.py index ef5c2c9..f392aaf 100644 --- a/core/leras/nn.py +++ b/core/leras/nn.py @@ -33,8 +33,8 @@ class nn(): tf = None tf_sess = None tf_sess_config = None - tf_default_device = None - + tf_default_device_name = None + data_format = None conv2d_ch_axis = None conv2d_spatial_axes = None @@ -50,9 +50,6 @@ class nn(): nn.setCurrentDeviceConfig(device_config) # Manipulate environment variables before import tensorflow - - if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(): - os.environ.pop('CUDA_VISIBLE_DEVICES') first_run = False if len(device_config.devices) != 0: @@ -68,22 +65,19 @@ class nn(): compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache' + devices_str) if not compute_cache_path.exists(): first_run = True + compute_cache_path.mkdir(parents=True, exist_ok=True) os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path) - - os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2' - os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tf log errors only - + if first_run: io.log_info("Caching GPU kernels...") import tensorflow - - tf_version = getattr(tensorflow,'VERSION', None) - if tf_version is None: - tf_version = tensorflow.version.GIT_VERSION - if tf_version[0] == 'v': - tf_version = tf_version[1:] - + + tf_version = tensorflow.version.VERSION + #if tf_version is None: + # tf_version = tensorflow.version.GIT_VERSION + if tf_version[0] == 'v': + tf_version = tf_version[1:] if tf_version[0] == '2': tf = tensorflow.compat.v1 else: @@ -108,13 +102,14 @@ class nn(): # Configure tensorflow session-config if len(device_config.devices) == 0: - nn.tf_default_device = "/CPU:0" config = tf.ConfigProto(device_count={'GPU': 0}) + nn.tf_default_device_name = '/CPU:0' else: - nn.tf_default_device = "/GPU:0" + nn.tf_default_device_name = f'/{device_config.devices[0].tf_dev_type}:0' + config = tf.ConfigProto() config.gpu_options.visible_device_list = ','.join([str(device.index) for device in device_config.devices]) - + config.gpu_options.force_gpu_compatible = True config.gpu_options.allow_growth = True nn.tf_sess_config = config @@ -202,14 +197,6 @@ class nn(): nn.tf_sess.close() nn.tf_sess = None - @staticmethod - def get_current_device(): - # Undocumented access to last tf.device(...) - objs = nn.tf.get_default_graph()._device_function_stack.peek_objs() - if len(objs) != 0: - return objs[0].display_name - return nn.tf_default_device - @staticmethod def ask_choose_device_idxs(choose_only_one=False, allow_cpu=True, suggest_best_multi_gpu=False, suggest_all_gpu=False): devices = Devices.getDevices() diff --git a/facelib/FaceEnhancer.py b/facelib/FaceEnhancer.py index 0b5ced3..1dc0dd9 100644 --- a/facelib/FaceEnhancer.py +++ b/facelib/FaceEnhancer.py @@ -161,11 +161,11 @@ class FaceEnhancer(object): if not model_path.exists(): raise Exception("Unable to load FaceEnhancer.npy") - with tf.device ('/CPU:0' if place_model_on_cpu else '/GPU:0'): + with tf.device ('/CPU:0' if place_model_on_cpu else nn.tf_default_device_name): self.model = FaceEnhancer() self.model.load_weights (model_path) - with tf.device ('/CPU:0' if run_on_cpu else '/GPU:0'): + with tf.device ('/CPU:0' if run_on_cpu else nn.tf_default_device_name): self.model.build_for_run ([ (tf.float32, nn.get4Dshape (192,192,3) ), (tf.float32, (None,1,) ), (tf.float32, (None,1,) ), diff --git a/facelib/XSegNet.py b/facelib/XSegNet.py index 761ab94..5621a65 100644 --- a/facelib/XSegNet.py +++ b/facelib/XSegNet.py @@ -39,7 +39,7 @@ class XSegNet(object): self.target_t = tf.placeholder (nn.floatx, nn.get4Dshape(resolution,resolution,1) ) # Initializing model classes - with tf.device ('/CPU:0' if place_model_on_cpu else '/GPU:0'): + with tf.device ('/CPU:0' if place_model_on_cpu else nn.tf_default_device_name): self.model = nn.XSeg(3, 32, 1, name=name) self.model_weights = self.model.get_weights() if training: @@ -53,7 +53,7 @@ class XSegNet(object): self.model_filename_list += [ [self.model, f'{model_name}.npy'] ] if not training: - with tf.device ('/CPU:0' if run_on_cpu else '/GPU:0'): + with tf.device ('/CPU:0' if run_on_cpu else nn.tf_default_device_name): _, pred = self.model(self.input_t) def net_run(input_np): diff --git a/models/Model_Quick96/Model.py b/models/Model_Quick96/Model.py index 3c39e46..fa139e5 100644 --- a/models/Model_Quick96/Model.py +++ b/models/Model_Quick96/Model.py @@ -31,7 +31,7 @@ class QModel(ModelBase): masked_training = True models_opt_on_gpu = len(devices) >= 1 and all([dev.total_mem_gb >= 4 for dev in devices]) - models_opt_device = '/GPU:0' if models_opt_on_gpu and self.is_training else '/CPU:0' + models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0' optimizer_vars_on_cpu = models_opt_device=='/CPU:0' input_ch = 3 @@ -96,7 +96,7 @@ class QModel(ModelBase): gpu_src_dst_loss_gvs = [] for gpu_id in range(gpu_count): - with tf.device( f'/GPU:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): + with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) with tf.device(f'/CPU:0'): # slice on CPU, otherwise all batch data will be transfered to GPU first @@ -190,7 +190,7 @@ class QModel(ModelBase): self.AE_view = AE_view else: # Initializing merge function - with tf.device( f'/GPU:0' if len(devices) != 0 else f'/CPU:0'): + with tf.device( nn.tf_default_device_name if len(devices) != 0 else f'/CPU:0'): gpu_dst_code = self.inter(self.encoder(self.warped_dst)) gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code) _, gpu_pred_dst_dstm = self.decoder_dst(gpu_dst_code) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index 79329ff..eb89172 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -235,9 +235,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... ct_mode = self.options['ct_mode'] if ct_mode == 'none': ct_mode = None - + + models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu'] - models_opt_device = '/GPU:0' if models_opt_on_gpu and self.is_training else '/CPU:0' + models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0' optimizer_vars_on_cpu = models_opt_device=='/CPU:0' input_ch=3 @@ -336,7 +337,6 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... bs_per_gpu = max(1, self.get_batch_size() // gpu_count) self.set_batch_size( gpu_count*bs_per_gpu) - # Compute losses per GPU gpu_pred_src_src_list = [] gpu_pred_dst_dst_list = [] @@ -350,9 +350,9 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... gpu_G_loss_gvs = [] gpu_D_code_loss_gvs = [] gpu_D_src_dst_loss_gvs = [] + for gpu_id in range(gpu_count): - with tf.device( f'/GPU:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): - + with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device(f'/CPU:0'): # slice on CPU, otherwise all batch data will be transfered to GPU first batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) @@ -360,10 +360,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] gpu_target_src = self.target_src [batch_slice,:,:,:] gpu_target_dst = self.target_dst [batch_slice,:,:,:] - gpu_target_srcm = self.target_srcm[batch_slice,:,:,:] - gpu_target_srcm_em = self.target_srcm_em[batch_slice,:,:,:] - gpu_target_dstm = self.target_dstm[batch_slice,:,:,:] - gpu_target_dstm_em = self.target_dstm_em[batch_slice,:,:,:] + gpu_target_srcm = self.target_srcm[batch_slice,:,:,:] + gpu_target_srcm_em = self.target_srcm_em[batch_slice,:,:,:] + gpu_target_dstm = self.target_dstm[batch_slice,:,:,:] + gpu_target_dstm_em = self.target_dstm_em[batch_slice,:,:,:] # process model tensors if 'df' in archi_type: @@ -571,7 +571,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.AE_view = AE_view else: # Initializing merge function - with tf.device( f'/GPU:0' if len(devices) != 0 else f'/CPU:0'): + with tf.device( nn.tf_default_device_name if len(devices) != 0 else f'/CPU:0'): if 'df' in archi_type: gpu_dst_code = self.inter(self.encoder(self.warped_dst)) gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code) diff --git a/models/Model_XSeg/Model.py b/models/Model_XSeg/Model.py index cd6eea3..567a1f2 100644 --- a/models/Model_XSeg/Model.py +++ b/models/Model_XSeg/Model.py @@ -52,7 +52,7 @@ class XSegModel(ModelBase): 'head' : FaceType.HEAD}[ self.options['face_type'] ] place_model_on_cpu = len(devices) == 0 - models_opt_device = '/CPU:0' if place_model_on_cpu else '/GPU:0' + models_opt_device = '/CPU:0' if place_model_on_cpu else nn.tf_default_device_name bgr_shape = nn.get4Dshape(resolution,resolution,3) mask_shape = nn.get4Dshape(resolution,resolution,1) @@ -83,7 +83,7 @@ class XSegModel(ModelBase): for gpu_id in range(gpu_count): - with tf.device( f'/GPU:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): + with tf.device(f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device(f'/CPU:0'): # slice on CPU, otherwise all batch data will be transfered to GPU first batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) From 26b4b6adef97b85e43af7da3e6e22d71834e5519 Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 22 Apr 2021 18:30:23 +0400 Subject: [PATCH 22/47] upd readme --- README.md | 4 ++-- doc/logo_directx.png | Bin 0 -> 25907 bytes 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 doc/logo_directx.png diff --git a/README.md b/README.md index 701917a..bfdaa3d 100644 --- a/README.md +++ b/README.md @@ -19,9 +19,9 @@ https://arxiv.org/abs/2005.05535

-![](doc/logo_cuda.png) ![](doc/logo_tensorflow.png) -![](doc/logo_python.png) +![](doc/logo_cuda.png) +![](doc/logo_directx.png)

diff --git a/doc/logo_directx.png b/doc/logo_directx.png new file mode 100644 index 0000000000000000000000000000000000000000..f9fb10ae89ef6ef465703bf46e194802c3420988 GIT binary patch literal 25907 zcmd421yEeuwl<1OfZzlRu0b0bcXx-N!5jC+U4sXA4G;+KBm{St1b26Lhu7JApMB1~ zb?$#|{Z;SPqv-Cn#u{_j{Kgow*IXU0tSE){itrT_6cn0_w74o16f_FtI2j26@;)$M z?gjZkc97NvLqVZn|9(M3rDx(p4$dvrwVbsS$^qR8%+dv^L=}r4SM%<9FwU z2(Se?8{!M0~Ap3*G*;;@?UFV$^wA=b1(t8c!Au!>}+Iz{ZT-)IhmUAs)|egr3-QR>?7)=&kRT2Mn>blII9uA= zk^PovWNh!^EIu78YIa-Wy1@f}8{>ciF zXKCi~H!FaN70ATKrVildW#i&y<@ztIkR&xVayI&p%)cj+SLU}1nwF-{76KFiw%_Xh zNtOrMng6D9a&i1cR{o7SNEL9hG?B4$201x6L6rSYJBR~+v8Wl@I@o}~${;6YOLvgX zA3^v-q}K0zv2pp6n~RMF@YewC|AGEbvWA@{B-juIzwyK?0eO25asvOMm6eShBIghK zA8}WMlsrf@IXO7kAvXU(|HBL|aZ3p9{v7kCoBzC~{V&&kyZXBb{(&!POR%%OlgD3W zN)6=r*O84S*&juM*U02gZ@@;bAXAFJh?f5bl_n;v9IRYs+)NxCrmRfdTtG7>V=iM5 z6BodQ8^~c|YRUtGxbde@Hw)13aQ+J(*mwazUf|z&_>&E6Z|3Y~ESnVN8e*nq|+03J5Ze<=L7{&4ZKvHt_K|L^-_VsGaPa&rFX961;{89`tS zL6HE3nUlRO*`Jwg~~)Ok0CMzIa&V8r2nhHzqOJ5---T@XaaI@83Wl_ z*_ce&KwL}!NX2C0F){)&83R~(j5ygLtrq~PasLN2{eK(%|LD*Eu4ex?fBvTu_`gCE z8yhp=Kc)uD@1?|_S9&fIuDyOdkp}an#l5h_T`V(Bx>pWyJ7v=y8bfdU(3B9_rJ*gjS7gt|AF+c z;Q6;Y{$Ti*g)2nuA0)_X`1j%8%W%la-wSz=9i(nML6+&$Z(QY|ptN>m#6{HIGmlz4 zs?0TARvxsyaxNa07HSbDt)j`0$*IU=gITrw#m-SVNmvJJv+xmVeT!=h265jx>J>-P z_!Y?FYJS0@b=Ci(*WgHpt%;TK28)OY(PkKfC?6Ra2u-NFRGvIpe%#u1lG!r-U~<$l zcDLlE3#`@`KYn7`o4Mi2G|B;AKOfh(TF)ArF`AO2X!mo)pE_|F{yusksey_Nflzm- zP^z^0Me&>z|9?yP)trck9VbzI8p61#D?Ti*Ke)jJ{L6TjVzcVl6n*iJ14hlga!3LA zlufQj>ze}!-~}Isq4x)qVf}?CW3Edm9(ShDcr3Mt3*Ta_D;g2lDZd1#RoB#X&d(F} z#gLl&`1lMxi3hh`xIMo`r-P>1Oy5RCH2K;FP*7_in52MN8kM74&C7H1Fk%IT0W`2x^mGq zF!)wK#i6UKTRTytv=B!rbqLOR$b6bqdP-zsVVOB%XECkxy(+c!@mU%kCh^$BTxjuf zdwSY(Pr6npQfoSDSU6mdWLds^h~`?hkcuIxpR+NO$`&-Xckd$F_x0Pbe?;T+I4?*` zdn)z2s@(q6=>9a;qCZ$|YsT2V4)T3!M}PFk)z;RoR%6MmsjufC8mfG`uheX(3$nO5 zUb;qUJ4LxZ?LuD=dhUCf6-Bzbc|&AjPPA$M@YK_=`lIY%c~xc=WXD- zP;Z?A2>|TjYDbXk^WDyjNfhE5DCL7$rRP+s&B=1hRTDruaRb(McbvRwzohgqUXHmM zqjS;q#B1eYzrJ}#;G`NOum^u<)a7(l#B29+=i zVPZB^@RGdg;kfC@;CaZ;((l0jiOK8ntncU(-*GD6vjW$W>C2kXBO;&EW|+XsApQ>B^{h!uTH0Kn(DPa2lW~;mNy|-o+T7Vj zAH(Y7e&w^((ssIaMYR-(NB8h>w(tF(XLjq!QK;{3==P_Zw<~9FT_11H=0~w<6}4ms zd=I|)mfBr>M)&C-9#)@Hl{GVKXlO8l%(UE6A?o9vvsK##U)u(vn$QmW7{AH472k(N z4~U)SUM-V`)nT=_c*%)aeV^XK8vX!+W-2~_+bYY;N1oowo#p8{bijKbnviZt`CUrg zjfV=teZN`z?bUY($~+)paUarM?i}G-8D@F<#e!+F2}^nll`sJ3zqJJtyzM}D`klM$ z-N=Hc0q71-S2GudTrJ^#Hz&_;$>GF<{Z7%Jq5WP?UVOxZFpShhg(k*z%}aqRQ<%45c*&sdVCK5{$9j9ynq5TeI= zIPLfIK?aWvq7|feDWQ86-{&1a692AH^chzu{;Nr~MZcG)E8?3KKOwgzpP9mIQ?x`k zpP8oZ`KRP>%^?hx&GCICUN+G9D|wJQZg13hHLcrtJ#P)|zPlFce)&_XZO?@N`E~=B zPPV!8*DtJzJskhiiXRa<&wDw&!^#@(H8mlX;j?A4H6s%fAnUdPUFh+&a3iGyUES7n z<01OI!mTAT=QcbCYV{)Lg}>5k>EL(h@8)uzw0$4@{D^B)g!E0xGeAZky-@~iIAL>b zb(cLKbnkvAQH|e=FO=Zp8ajK%Nb8;mkYsr4X>jmWkx~xADkO57y+rn#9F-nuT#F8{ zTuaW_C%!jL9`}1i(A?l{)Y8&YsOG05`y$%~&u07$J+IyqlcJ?x(yQQ&ld3F zc36cHdh8M!_SnhN^oXtWdpdYrBYh@^I`Q7&yX%({?2LKrk2!^;Y;4=j&GYi&+01*y z<4sz>bK1LV(gy%@%jK^|@XJ!$gInWXVFuJ~lEEGB??Ugp_Qp40YD&X6>M`H%GG?~T z?p0XLA=Q*JV^X2z=%dtX32VaO6%oa;UT16OK)L`>VxZJt60D)564k zH30GU_~hAc`KI;8u?vl>%6yFEHL2jRl;6QOJuZ%wOGqtmKJhvEqd4$5pOL>N6&O|v z%@Kz&6kTXobnJUuka&AZ>XTmdT1di%qFCv1!tY+mO4S8#p(RS7xM z=GGez`l*(;24lvHl?Hiy)k%#2gyb_Lf_i30SN0#u-RAd)%7Kw=7yYbi z(`oXYHeSc)BT!OPsiUrWjjaM!Cex_$@60AUv=CaW*4(1x`R-SV-ZmW)A-N!y+D8Xc zL$7=zo*7$%)wZd#=c$bK8?wUSt(2;Lu{0kWThvH7l z!!SalJU3hD#0Io-c4hTbS@I$I!s4rKo%h%y#zp#IX0gt2nF7tY*^yaM|MeCl=hrQY z*H{vaHUhYz#{KMV4`zU4*To{>l3!KTO161bNw-g+-G^?iiT5kJhBfHHWk?SS`^plYv6 zH(`{e_%XVhFClnwZuHk1o6E{JrtI*z0Ar8XkYCl?t|ZGgXZ!~?)~Xs?Or#I}iDZd# zanP&7(h{{$;?CG)I6oVEZAnZop2Iw_tx*eIR+GjWk1|fS9K;4u?eoE}G2HMr%anm^ zVw}V+7G&U}jvIc>H&La$V%TZC1piN`ZFhG%*gZ?j2> z!4ua=VVwFC+NL%>(bb!b5B5Jal3rr(&`KJNufEd|`K|nT3pPN<#aCvTATtDWEai7i@n?1!Hx7B?!fI4&JqbW$%i$44XNP^;`BkW@TLei>AK9xsz#w5riuN)TF5{W_%g4EQSh#n!|)X{kwS7267}IkzIIk zR1tFjJZ&O;MKZAe8k+@>Xsq2ky6bcFrF{mQLxiwZ{On=k3#@|4q@fE8 zE0PW|<{AuDzKm9I)8XWXRW!(vxZ@45h?p$XZu(FtzApLkEqEQ|fb1Tsm_VDLI{_vGPIcQ&jffY6AdTCB3jWw#d$<6DyCzOZ_%$*@$WjLT=~@^V_F%eXyg(# z;CAQ1DlMsHet$csLXN7_(=LGT$N(;~!t>xGS}d^pF^ru?kM3Pf-fzQRg@(p5Srm2) zW+ONC$3hE}?1o8RG%X14#s^&OPz;OOtK%pu?Ne?zRsWR?G9b3E+Odn zN)N8f#Xz20M(0q>3xu~(-l}=WI?6^g!1`U%OirQ?hOp}`GQp!BI^#~XOH&!0O}rHh zZn-d;3al*#ZaiVJTtT96wBa}~aFng>CpxV|du%#RsM*LDz4qzB(S~?icsvT#fDR!w z4-{)*9Wvff3|&%r@mPy-q|Yw?RlRJvw^5c7L;Y_#g8fVK%y8@r(rzDcvNl-7cqPm~ zURP*f1SDSVQN0#d**>&6NfTtG!pDNQgd-f$kr1XB$0vlhgkd{N6GRvY&>{mQ#IX@p zX-8K)!YCv4Cn5`}dFRHkb@k}Yyj~|pNFMpjFWQ^ly=w_2w%Nts2JjCt`=wr<3f2z7 zPosce&{G>M6eGwF5uV#WoTB=CPEaT~jz(VK?<#w^w271+K<)3s^Q*2arvqu0YC(m! zstp`t-!#|&6InfLk@v1bEppq0a;$%TJFN|O<`rQOLV-qsBkp_gcS_3rqL^Xst3TT_ zQKo6jNF6bm#$QbnOcPy_wWuR3PZEp28&e4kiU{AFM`s#G!it=x9ETIdv4H*c!%Sqj z+h&bpoTsLJFq;1H)#k_KG_IWHube&XaY?}?UifoTdZ&FY__`tEGXcD8L?wNB-ZiyAZ!IgsF0%sy%w#kog3Y1R}sd=U?#S0d{Nl zK!Kl=x?LwsiSoM;=8_GK^4mVoo_~&gvkzMVKd@?8o`)TTC(58zDvj-ZjjXD1T#ZaY zKAoWT9&Cc1JnVBiN0O`CM1iNM<~8Os1B_6~~H42AbQBGqP0G6(96l&mHH$o3*D@t0U(RKJg4&`1Ft z9jj1$t(>SsJEN74@`$WBG~;J+BN{lA{beM;3>ivcbiQL>GTNO-@sAw}DgB67B0U|( zX5)wj=mqs88PuXSmNaODyiu!T?-Hh-y*c9)_(U7tYT=;G2ciZW?9FsY1&9+!k4}$= zk1OG5T3{f_#wstBFB7169UqT%7DU3(Tz=@}wV**8VZ1*CV(&&zBF|fH zQ^Ccf&cojU$^EHBaSCwD(6oq<%Q4cWvBVbevRt*AK1v_~)MMJ4QDE9>7`A9411ANrgon zi)FW8Y%N--N<^nYWa7|dXENt%2%x222bgkbMd61OMq8+hmUf35KLiI=wn8m^(E>(o z>WHJJ)t>r95~+gCuSIAp*72e#OYgE=hqL9JV`vMXaH5fEJcMP+*bOnxxT8!{sS&B2 z#x+>E1atzz+o{^A3{_;X$3fgD={}cD@Ac}Fg=14UWKAbCa7&VfN!m?K<-WfA%q7aE zigZ~j>*gIzL(nbFQnmBg(yvMu5*%S19|l8yTW=#j!eF#A)udA^)8(z2Llb{3Z7+`2 zO)|>gCaR;>ow%In-htJmUyU-wVJwJXI%6L0Q@wtEO^GBjPivkNd`ujHKB3z9uv^Iy&VBZll9~5KF5h>O*61nFHf~7yr_d7=t zV)rM(`>ofQl-eiks`|oxRD7>BYIU6qSNoF6a)&Ju97VxagY0`$p4{YF@w{9t5kqPe z7AZ?^9+I4kiqojdXUyz1Ai#NvkSD)H<5sx#%Q3+ViIirnB>fe+w$sK`UX1YvY4rXdu;6hX=Y5qVPG zannHQ!Niyk*q2kSR*Xtgs||Mo#rJy_a2F1iEnqU~Vg9Pv=N(+h6q;AhYpx6BPv7H@XmK7Z9veEcea)8sR-PoVA zoyr(G`G)aq+pRfnAVjW=Q$Qs|CXjWVYF}RGH4{&vBdo<&GY-8F{Y1eEY_ZkbG;?l( zn0&m{VCwtHi$Rpi^>OOZ-MUi3>D9ZhvDH2Krz$)o^681D6)n_q&jpN_7OCYU?$-l5 zpEdH}?gbq!R_+#W2M=V5rI;E8)dJh~ZIop_KIZpv&WN2G0MxMptIoG@KlO3cT}dlU zWQvjs9IkkBNiBx6w_YEyEZ{Ios(QfRM3=6T1VYhYU)m~B$`Mq*Dp80|%+{}*Zvoj|wpMo%tfUS?` z2pN#P#_vbWMe?1sCmJ5@G_1JgRQR+w>yn;uF~Z~NBHUc=F+Y2X?$a{YCj zdQJ^UGdm%ApgxKp)_PK;rJFrGLJ+ezh^+LM;`4MNUAu~zc3hY#b_d~FFV}K^@tkG0 z`=#SLmGCuGZ2GWRNhPCxjTD$f&dL+`uCi&%v*P(MG6v7io3J9kN6X^bbhzk}VY$c^zHzuz;pn zCd6Rd8rQ)!x8OGg{nrSQgrPPzrmOrVnn=^R@!ZpIX)}?5Eu|NuWnQTltDi2~f-;zQ z7^3=R22l<8RF2zAt zTF#+Dl}JaA+rzX9#ywxZ3PdjqkfRT_oLtq`nk+9EB(ZW*-=HLh%jRuVz=d4}L!?XaAD7)4pZ z1n~#I^okrzG{)}(WN>wOC~1gr1Kq&`+ba?R-AkQxq8uH}x)fw}X2(19)l_qdszcb} zaQxd8q*D#FQq?LATw3Y`^idq5LrdWu{PXAGMWiMzA0!fXxIg(^OLTq}QU~Q_c6>bk ze#Ql*LK!yBiRM+Qw|&<)Q>`ZOiz_i=ZewmP(6d!CcpJAVO}{#&lK%xM+$C_t+w84o zSH;d0xx-+tdc|w7?$+ps;UX*;5?H*#*?~`N2Q+{>l~0-6!^qiRXyfueH~fU-&Ol~! zXDln>*0=4gHtf9GAyaXt5&WPCrHd_e6?ia+q*rl z$}w^_4I|p+{AFu%5IKwh-YN5w$;&ljkUiA`DPvgye-Y17r@2`U?gwf-H#N_8Xpj6m zF^zWUQkD3-(%Q?R8Ugp!Uq32poec*)o_w=?4&i)OFu@dV!_e9~+%W|}xws2v0e?mY zA3a#J*Y9XhugFliWwK@Z=9L9Jj&IZS6hch!&&5Tzq3%;Z40{Z60OH;K)k)d4avQ50 zIFDL?4yOZj;3j!B&AT;xJI>Ro)#^~F_V!d_f{?}mum|ji;!)Z0$$XhEV$%nDjiu4( z)Wg<(2;<}cfHmI_PNOWW2kv_julEuWA5|FRYf5X6?K;INCZ2}+JNox_6tSr$I|@}1n8IF# zzJ6;8*@$5(|2fXRXQe!n(UFc%$jXI&4|Qf`@I@i}&1V9-lK24341~C}7&MM=D++0Y ziQ`h~n^5ZSe%dx45P`}Z@65%ROp}&d2=VIl*-_g~;+3(JZPT{wI_16EG~+i|&RtM< zNWs)58e`kO=yxAO4i)GNb*wS5cTvnusGGg1ctd|)jCo`XUBgD6^=r^Fjg-N_QR8Z} z-+>~^3EwC%Z{up}wcp><&+v+pX*tKEr{s-@y&yvY_ELj^2ZG~3kiphAe+EZNbH*(O*^2pF$Er>dK)*0cDMj=yc z8{}&w;MWr0Kiz^2c0$sZ--aV6+oD%Cp6mRWjFYC~Psz7nJ!8}lKevsp17_YRm%E7K zD9hE^4xx;-wx1>KgiZ5`WO!cBl2q`ye_$qxr?e0obHSWd~`QZC@e$(iK z$Wn-a?uVUVL9Q_|e4Y-Lor=`W0rTKF9i?yOpof4Stq6sR2x`j9?YUIUvY=Ub^ddn@ z?>nW|VYd&LiWS^1s`oBCOqm-_Y-Nh{8YTqAb(931F`0=ZTs(5-H z$nfZB^z7!Ho>HQNHxHyf4_R(t3DfXj%z_skdV=McfACi-@S={X!kJrhZF9dBRI8Fv zx#CVtRE3Yx*4a5Q@FUia>0WhX)iT`LedQ#x$}yuawJ*@!tp097h_YHMpI_qc@yV}y z{_In6!{t1C1z0)H39-w}=A#`rvA@Qrqw}<19997trqL-WB|mb2@_L;yS7CEEzwb6* z=#lZ{`PuC!BiHC65uB0{ND#wvoKU=vAm;ti?T;-aMg)s+?I+D^7Sh#*nU!AimrM4t z%Y!g_P2wM!3-%r;Y8l@Lb&ZQ6=aeJ2`fr`sX=yc%n0MZ8{}MeWkZrG%u;h1V63_Oz zw`_e3LEKfMoi98K1VWPm)V7YBj<>HDRtBG29;y@zZm7?sd6KI)3#1ls6+0FM7fgdnAM^?>9ujjD!U;6 z{biZuqP+zjvkC9sza%$#;8K-N3daBfTbiE)=m#muQ;s5zG0@Rn4%tK?U;g^^er}QqdLzi&R zE7fD5Nn6MKMhm(b|>DDE|@ymM4uwwbo!S2XOvuClIIp@n;%wx}0yXDJdF7o)Wfp@~V zQ{JC0dpPps6U=N!f;!5&h2g4l`OhY?y|0R;c4fpPwS(A@kagC&=VhL6<@|=-AiI3> z8S)Xbu8q@%bub^ddJ2bERo!O;ztMBXndk5f=XjrizZU8+x8ulvk!*Zbolem4YGtFZ zyuD%M%-GAn=e_ahDwwA=}3KTDohSgAq~=c89@*9?2IM zTKJ>yd-wHn%1MDi+UE_4pxbyb`^^%VR4UsuLUR7|FHK4P9bOnbiWtb=)SVLl;IDI> zp#TlmI!9|U@vMb@=6u<0GdDIf{^v=PvN1FHPPo0=qWn`B3qe zYL?j$u>X~lB7L|Bm_poLyjgcbrd3(+x&R8JXwH+WWMAI%4f^ld&-#e%eAfwGwX^|4XktESZCo^By7TYS6Qs) z_VHaz8%wCLAEkIB*v;=R%1$A-QhXIj+>9EHQ}V*L$m(~|tKU==XGj;m=Lo4jkTE3- zEcaI%6?DPwLtZrucu%2_i6ttQ&Xb}lZPK@q7tu)D=PH_%QQLnF70E<}6HNh5sGkjR zM?(&e=aFN{D9oZbxGggYuUx#NW}bcFbUj&;QjLKDI!xd=$=$}PQ?Scz9c)cK7OT=-A9@{wR#QlAKxB@a@_CVW$~ zN$Gg{=JDQs$8_y5&9>nXldNizx$YoFV3tS`^LT!PW8dJeeeia$`-X8Fz3P=O0RDbjK@} zG3by3#4O#~`e;5faqw5aSjH3n%#`|BkV>-Z)rN&(%1TgJlSR^DGEuT*_i$GmJbvX$+JW^nhJSS4MuNv`0y*3nG)$M=+y~l4BoOi0?WU>XHjou9%zV@nGotKg+MB(7Invk8$&sd&>Epe~b zw*$YP2f&~LsAS9-cpa-8c4{eGN26FY5IKY^o!6a|cu=NzOfnNa#*rcJvN2*1Pq zE=zg#1(Qb;s~JIO0$BtZ0=7*D9DS3p-O1+jA8Yhm6c|Bgb7PN~=ssJ}=@>G#%N#-3 zvUaN2YPI(y@HGz&(fSB{Tkz?DVQ-p9ss|M5MB!E9bT(75j^752E$2~zk4F8#V538y zQ$WUrim0ebj3nfl{#Ek1scsBMWx7Qv^4x>d>#`YDd#bRwd-WDJ2|d5zF`EkS^{+uk ztiN4NZ#&O1;>0vTLdL7~-5lc=F@O^Dq3_T>Ph+}`Vr+sLsoT07BzV5?yJLCmIe8Ie z=6``v-I;1L(H|+@j8DYFpG$6th0SO5g3nyX9+|4QY8%QWgLL^z7aof*K!k@j{T3XG zaXIMebpa1TXI1v`?*1h|r#CrIq&ZgyN4R^@tX*$JJIyEjQL|%>3pcE5GiVgE1UEpJ zYHb}~Eb^mfGWS{HtwW^YNdcPA_miO3UdT<7bDsg8Vj}l9LkDdYWn~WQo5*!60LHHO z>c^efYp=!=KQo!M_aE7sG0E@^1`dJHk_s9Llaqj6rP#{k%B3Ib#T-m@AvYFe6zzuD z6^t6dF{sA`Kbct~XP}e$*%QPE?b=wgPA}t%?pl<*18-V)9_H$;-Sk<+$1bOi;I;hz)fYFJHIl6fiT{7Vbr+~FDJ z#JqO)cF#r(`mf<5<9aPTA~2VP6+o>FBd7pMV1Pw79|fijjm5^Hg`SoBZCvQxu=#V? z$xHSLxKmDq%6Q;HDenPTd`INrT3=Rdp^wqmg3YsIQqj)GRyp%!I*RN^dMeg-|L@r z5jF-@_;Nob4lrQl?$&P77=ah>7OY6Bro;{Gfq}Al=;3?kSdfR;SW*m}c~0)ZE8!LH zgLMspC@DrPs#vWE`k$*#dMq_>L)XL5I?xU~S%hhn3CyOd*}s=PttAwW<*BMRou%Wx z6U`J?99F#W%DKl;BH=qM4WmU8XeAag*Bb25^+0%!X{@g|0J&B7D}dS+M9*C9=iyfo z9VbADm1+MK`>cO-totbStAfnlZSX}L8iT-d++&<03!Ui0E zq}FM$0$mk(j9;OCz=)Ke2f1<#b@3=jy3O_S9l6gz8?FVtcPjB*t8TtP#Cf|&kI-F~ zT*0RmOy--yE1^rxiS7G>?bkmQTCufY|HS!TG5fP^N|-`F+*_Qc#@U@l+{tte1ewLx zzg`(9-$deDs@+_UP+4OpH4a7Y_d`+DizYU)HyJ**wcYS7K5RW?#vTH}IDwkOGeYNpp*wbzZH1B?+>DkZ`#l5&af2qg*y=|D^cI$?PB&bRAr6znx)O{AeAV(BI%4yp z@d|^Rtg+?Y$B4)DGgnQ&sDW+I>p%UDxUfhTzR)&jJ)Lx=EFrMSn{WU}D=e1pT+`e3 zvfH*3DE8(weW)^Y$_Cb)9Tp`RV?XNfP(4_%i`S64tEw1%MCJXD{zZ|x<*jtkDMnB|$inSwHEt~bXY_aZpFM?o}0FCLLZ!A_Rs7e)ZDNNs*X1SI& z?4}7>)(lnRPvttYpn;40gjQbM-zeKFT+@o>`4J>~kpY}+A4;r#N}tAE&R$9>4m8gC+I;)45#R-4L;X&!<^=VIr$ulUTj+ zp21*lML_>%VeU2y_SyP3@(gjfcoDW@7LI{pLpI`>2=s2NbS+2Ch6!Z16+f<*y$#aB z6plQCUku49DwE67S)PL8_N}KeW~-;Ha-`aX{MZwx4wgjj@RK48TKe5uuqq*6^?TXq zx^a-`F*va5&jLkTgjq(jERfv?5wu<2G);=+B+TWlMZVj*Ej5hG)kpuikmUG!pkA6n zD@aSO3hyU!2Mt^n8T~Jdi~}%^!zc^G=GpuE?$Jp=Lz@}k6GGYbSKNzcdG;tI1izea zMEUr14~v0u<~ZK2!d}=I!S>LXiME}W=d~PTtRfznVXkZOvRF$&a?nkojPS6vG~p8l z3R@LXSQ2W4Q8IHY+E5q^>({WA0PJuCEpy319@32iXTwR+MngO> zx6i0?xz*s;6&vu}vmx|z`w(`ywqDXfp4I3V6Dl_b1SfF|d5n=#t}EGHa-b+*);aCX zBj8Q8AEw`4mXQ2}R3&qgLI~#lCU&xyN|%qf@L1Ij$XX3xJ#Kk`n3gKyzo3xlc02 z?xH98fsilw?IRmQ0tfa^l<24A;dj4weJACZgKY;o05fK)_DQk0uNrmbJG{rNw!eP* z#G8uu+;779S&t}F_ulmQQ_7QE-TWv}#uY)!28Rd%jx|SieDlMQHJW#m!-m0lr-8p^oC5tvhOdnBhUmKX^;`W6ApQ zzRx%7f%M^3QwFt7GAnl7NmjSK2aRGQl=AsKn&9ktRHpR@S&gl}Xk|heIU}A*E>+c| z9UYCASGgi=nkKLWR{ICxsXM#DHl?;2eKn?=5y;quzIMpnD*H{d?uJ9tus^Iw=5lNM zRcpmj(a1Sl+b1Fip?LXJTxqnT3JR-%ou8-9i|@|zg$A)#Yu~`$0{G&H};ua*m}>f;fJwg z7Qjq=X*ry)k4)rs!%ac3KNBzM`Ehp2C+^Cqr9KLCVqr({yG;`&nZ8m}r7gJvp=9yo$Ebi};Q zh{c2#ihI@jzFe}G)^!@32%ixcA6NrS8y6qnjHI02Un2UncGC#8lBv;PV%itj6B#+# zp_BAH$o@U6!dQH2_!5OJn&0CVAovn8cD66%mqjZevGRk4J-Tq@4ZGLQ;WZjtr43P> zIBMsB+So<4(Cu6O-BAqM^{soc*XfL@;YPXj=ST;1T-Y=(6ft!SH8#_n3?F@ z%aR{iorQ9&WU%>Z^C_45!)w`)r}e+;%MQ2_VsZ~YTNB-TX#X4!{en2%t)kbiKG8e z@^=-S^m6wf!A6N%Tfl4zl zh4Y;}ui!uB_#(WI&gGFZv->eNpMwUa`I-S2u4d0P?rMOLrP;v>E~b~?@O~w4;K+1t z&Tx(J>&#p^fG2(<(Wmznxzu$Ce-#W5Ipe}Pa&pnbT}Rug=69=?sZ8FpK+=Bxek;nh zYK;Ag=ZM49G+fUp@#=)2RD{z4C8(| z0#i*&Rkc}oEAO5#lCOkOQhA>Eo>(AIL$9SSDEg{7uMtwMYr20eIDC1n^O~$RH||%$ zxSoZir%B66iKebi|08qGP@d?)5@BgAyG07hlUZ$l?F(C9e2200^%$2<=m7_et(67L zB~?s%Jr;Dvfg9ct+l;^$9G}r6B8jULB#5U0*60-L00dmd^PB2s^f|@Rd4%`SJ=3cn z{En9vZ%Z<`ea`DNn@6v?EVY&^hh0em`G==qy0BosDD30;er;%0Us$}*`{pcPfo1TpxCt+>D(^#jzMZU4U-93{S+0MHB z?&QwcVUASh#!ph5eZZF;ICGeb+mHG?Gq`W9AA(fKoPGKrKb31xWG>c}J(Gygog8^5 zLiI*xL`vnznlD;-g-cwuHD@bohxPo8S^sDPG#wQKtYkk!m@WpXUI)J&4^+8Pv?fA| z#qbXT`?kHYvtzE7j9GZb%VQ1<5hvn=P&ON zb5xF}@-b7Zo^#43u;SMkM>^$nh!QhW?Kq{v*QtnRat&LxAQ!+CLz4sL=ffvtfjqpm znLMnwfqioUpL)rAJq0AGK1$o9r}xzo&~EcY5GsEY14#u+XulrMbL++k*exj%rvwVa z0tm@pL7o@Hu}osMtj4d_Z@lBE+^Bq7H8Anxj=}MSY>}GS2r2lDIO!DwFJgAm*u|{X~d3RK{F71HSMB! znFcZ?3}EG{S_k022g`bYsfjP(LWo~PAi&xP6k!h!lH~cuG(2UPpf%Ey74e1-h4EE@ zc4_YeV+p|TO~@=%GOC*b!fTbWV-RH;eT&@13)$95CBKcy5NQ5ru!C5#h zP!35a)Yo+U);>QNW#4qPaC`r=@Aj6|J5!KOKB|fsiRyN(ol@AbdLI@bSU1M0By9%z}5 z3_Yb~gpCg2u`9hx)q3Zmgo0rXmVr@ep0#I))7H95bjgS5%sc|X#FZP1xJE#JF@Y~M zEBfh86QAbSWQ017Z+o=RwrGTu@^Be+)1nrzN8Kp|%}>|goK{!w_?^}PCz+*lsc4@o z@VD8=|DWd0>MIKO>*7ie9TGEy3@|WA!;pef(%miH-64vU(lEeCch?9*mxOc-Lx&#Wb-k8$f)6YTJhT_{W#kzE&QmB>>Fsnf6%r*r*6=D31% zCNU55p4(db)aI(mrKjS_UJG3SR4jr1Sp*H>)w zDieQ+t1?rEkA57;@|35z=CyGQu!^E<@}C9=d@vckpNFJ4(-T*Z80+Lz1EFqu#7erp z9;y7_ZHJ$L1n)ZMk)CC*qns?xv=z9ua1kk+CA!tu5NuA1sDfjx@`cIP%$5Nv`l-mCUpD>crZm(?`xlFq*-eK&?s6g0am-iX%>1}-FDf$ zVw5|vtFNul`#Q){HILCR<9Qc}k3!wtuO0KUyjFJEIm*r{r1&4l0ejvlX7POM)*lJW z@ygP{g$aG8?hpPNc{k<;nn+B!U&+TQq4DqTzt>c`O?U=bt(bWyC`0!$wL3ZM^Z}vU zE@w~pz$I|1uW6tGV+DjAkNm0gy3Zj~RyuMbX@R43ftPll^OBvgC*E4e)|Z?xqa%_b zrP8`YIUdbPdcoQ$RX-np}kjm=C#391(CL{aLH*M6lADu z?b(-r5~G95hnsQY#DM~ClwK`PY}H-c0PO^f8n#y@m|LAd7p#UQq0LR#5pcMD-~Axf zxd$JM4$j>UTVv!8=$mH!2#)oXlv8OQ!5!N}ol^p#&X|nxp6%SBve9JwT(k~hKfhYp z`NDIp{XeOE@xJ$NEdno5qva1LNdz;4fUzc0-Ak3=tQD{=lALLl`e$R-cfftXvBR}* zlEeCTL3xo?x+gDw+_?6Ww05eE^sZFD3j+jJ*iTi}qh=DF7y}2rrs!2gz@uNUzuO!? zd2-?vUnvJRe&agR_y8*H9VzL4Rv5eJTu$A?os zv0byNj^s-+E1M=LOi)sb6k)EFc2P=Arl}DQ-v8`2_Hg?1c(Z|vul!}x5wFB)+qaR_ zv~2N+}3EMBu^Pho(Ch&!nqU(_7 z%`KgoRA|PX$&TZqTn#C)7cSJ$Zn3XGa2Y6vz0d-*OeBx=)o%m{O$Z56GcW{6KHB0E zyi?`7KpAahBPMTiiBET|*S7wfN@*_36F~ythJOWhb?WR)jxQC4(=hpJSYPC&14B(rqX70tva^lcuniGA%T1s)DWPZ~y@Uav2oxZRALnXF z%xcMHmhs@GdRn6=#JE8-dz*E9L*%iwlsXi+ff;gImfqt*?;(gz3FRTevhiL$6$Kad z#V!sd8F(H;X3gaSHTHS5#2|>ju@)J1%O<;qP03m*5rj^_B`ivfSe(T`PLWVZM~GgX zzzN$k(Om(rYTY9JUAnevadOSIwWN+g;`x8YGI7hFqmnstlesE#5A!_UK9b;Io%x)e z-#FhpsSH}2^8OTCQ^}diaFCaUtCM82_rV)@W4MsrX=BKaNRIpuIU1>k_v}axlgTd> zvWmb};W-mFe25!PT(3Ov>}Blax+*TQvsc9iBTVZ|T@HGy_jI`@HfO!p;agvw`IwQ8!WX@dGh8@Zf@b1B&E%X~ z^#-8YX9|(UT;fh1V&tf;R zvQ@S@jdxUci?4tcIV(#GjoLfN8!FUr! zqw*BNT)!QaCSAi)EH!BOR@qQM6T1a-uqV_IG3N_v+bABcM^Q|yp9J=v$Fm(dVxg1L<|5IBJvyiTa=RawuD7S6og*kOq@YRzTH&HQbx zqbAvLC+Xs*sL2uuu6OU=o$w65okd_7SUlG!NAyKHOfvM#<^wcZS{R_dYjNRX$LUn} zblI$?@HZ=(|8qI?q3^%4`D1bh}p2llqtgLyY@U){4%rqXqTyZu{M9g zhG?zZ$Qq04y8~$YkA8Fa9(CL8D=nNjLQEb#L8b}-ooUc|)%)(a*3K6kz zP&Vd6+ufMXoTh##+`i`jywFWBaLFKq4bD3v8w~!YQOy$rYqe4=TZ|4HyZy+0`#$1a zZi5|@`M#-rQq|iEpna{(PVT*p9DxdT8MWK7fcQU{jy7Y*)gFY%;g?CZm&+|b!DI%; zk+UX`OzvCUn;-%XWf!Pwqn3ZEP^N6EjX`Bmr!=EbkF^>>!Y7o-ClD3}so?n1QqFtQ zgEy%Zkh_eTBI8YL(`_s^6s537B*UFvU$-hB6P{zOS)RWC+%UDFg>nW!^VNUP;P}zx zJ`9}V5j`!m|3L}7HYFGDPZL}r10mvH#XJ;YTi;YiwZSEy*3P} z6m90JsfsXrCWwnK#9buqsi_#Ee#AiN1V6oigsNp&wl%48;(VN$ z@bjwSUk}!{0|Ne#VqQLt0~hO^kHl*zvx4^oySH5$)P?l#8_y*w6i)m#wrH>>xVc4% z@S*QGq0@wKLoQq-1O5gf%2dt#PXtL1UXx`5Q+63*ZbnxHSq>R`N|t}EE{M*jpUCKW zi%~n=Qt!^i&1mqGGPZ`jpZB^LM#($%=w#Y6x)w8oTKCcoy|xO+&Ot*8Ucydp6h1r8 z5p)dAQ#nLM&?=_p+E0`mNx22t6V42Bm`!eIrJ;r2A8NCJIhPm#Yw_(8oR)NEe$3et z8VUapF-q2Ut;|m)`}v?_TQV{95IU5^^t=-?H=F!48OQpns3a^T$#!RoZiR}wArkJ+ zC?zY@-1Gfw#gw#j>L(w~s7u)I=moPeb>z1c+rBhNK+BFStc=`yBm#?$&!q7F&1{@3 zs*-a;XYJ;EDDY-oaaY)D`+|^6>V&`L=N&tOqa(#u$wE4?Zo5yZ>$%gCepOKF3Po%i zZ!NtY7Gt=wrV^vq0_DQ5ci3ESTR-Wd^H`!kN9nmC>yF$0HjD-R+jm#smPNwqFROto zm!)p$n-iQnZRszzPpR3yeUtg4i!M~<-TEFw+xjqeO<}`J;JT+>gh$KN8ukcq361qO zg;j~Km6a=YkBZ%8Z&We5c-f*Fqvz<&S|6~w57mE74t?19U2+*|XEF9gX!u&}8Zy8Y zp?~M}mwAxPo_@3$mnfe+XM&XC&QhFNHAYv4ow=4AfrVmWr-=IE_&!BsIkOLY!h9cR8QNnQ-a@YcQmr!VQb5K)tF zGG?1!gm*u=g5Gz3q$TOhv%m>_BSsD%W-zf9I`~OxgvLIYTKCTvLHBGHo!6r-$c?Bu zUCFD(g^Wm}-=O8@t-)*$YxKvi@kSr?D4s6r$+fscq)PMl_n#y9Z~c<#R|8oC{=?*2 zAYs9SwZfKdizzM@nfFyo?{)&a-I!O1xT|WFn?-Be|FH$a>|I%`D3$6JK>@WISE6P? zV_OV!th+=)FoH^XqweqV=@E(3N^%*(lAh>@90{*j`)Ny|*M^P5DgBCV-@20G__{uq zpv<~lv~E=kO_G{tW9?1c!*8X@2BI>%xPF(CmDX%kDl}|zRUgy%f3wa^Z=y!#<=T*4 zz3u!WZ>u=tu_Kux@AFgQ6 za?s)V+^OYkj<$Q-)={B?(%OAHh}%`*{adwLu1y7kRl_Tb`tc*f1M9;Pzqf~KH~^*$ zeL)X7^6*7ym&eZV6&BIixN)Cv+!mL+Y}zqr5N=R@_n+IlKS+4Husjur)#PxQcAwIw zDpc;}=(pz65LTFM2JU=fU22{oyV{iBc~)9m&d-|c(Vd%d ziu6khc&rG?jR-q-vxPI*OURD8-Jp`;bzpS+^?17<3TNR=G#iHK(lq;Hj)<2GEVZ#pRPOwhr9rmP zr#toJO#HS9eL8&z%FuCXDk22lkcCL?oZ z-^~;AKh1-Zv32(RrA%+)A7$=4T?~{SonQ>%6>-sFC1%B!l9@QF ziE+WB%A_nJk8vcmCwd%qkSkb+(?lzHOhF#xn?=)Z@{%ff1*>EkgcZ~u@e@S@TV8@$j(QC_Ttr|<~FcAR&Y8v>2 zB)JI1f5YGIkAF%4^BVO*=WZO?1oV&4*fV!Ydq4lSRdP+~{R4+=de0yM81pCzxr&+F z(qZhRNgkg1+ryglQJEnT-ZOuZWt+Faq73#lCX_rERo+X#oSER|<3vIk>@P=M@v3~^ za*V&_olDU56pkO5wo{;*VO95QAY+!TQDf(kl^IUd91y+e6%N575rB}Yk$T>8@v;su z!O3+h%7``!_xqjgeKad*CYJUn=7TG-Xwm>AsQd0=uaH%~iAYNdWOv*8sDG@40Njm; zCR?I*{CZOsM)=j12Y)4kTfOXJ-bNnwy-*?YwzR97YD;mDU5@S)9HyYzS`RjFwaD7ty!S%aW%sxtlx zeqK78dony`A)#g_r?lMhM}{RDG2?|>ziSMl(IPo;yYebn(san>h)U4=%P$CMri{q( zGf|paU9c8@3nn>#^9jzme<-2N8p^9B7kiM)u-3Ol^v6{p{$qy8SEXTTu{6OFix8=n z8(M?RV~j`fVR(-`2;mE?PhIjI$@Z`WPdv_E$lOnqIl<*WXuv-nSk#Enosd2Y8Y|xgIl}xBB`|ROXzMfSlJ*a##@- zC#)uB+DTJS)Fua}r}=^i;n6RBL9M-C(R;Za462MO_8Vz-QLk#{U}@g^OGFm$})?>fkE)WDw)M5 zo-6kM#As;J@{LDe*ng1&lEx~X#IXJ4)4vB|>I;Ipmv^Lvm6l+HE+UHI35DsuQA3jag<}4t@gqvS;d9 z_Hx393oW?sz{WYbBB+8VWQ)2XXB0do`J}Xv2)EajC($X%zr2_)UhddVmYA%`*}SQA z*tJtg4s#SLQmK7)PpMNUY>nFdg>wd1(9lP;(-i0Z9+Pg)IntOZ1r{a=2hDR5U-z(~ za&BI?cQUf95OT-rp~IpGnXAaXyWa1E*<|jNesS{E)ShkGkQG-;jRgOo#cWQN1m!?r zP@3svlva5LgTHxuF`ZJ;;g`j1QqQ|eA%^>9fh>2z>W>+qT}mg!4>>rXy#V)w4IgVg zDhPC{^FGwJYyalJ;Z=XBs0G%2I-AQ)3uf@3em&PZe0 zE)>(P%LV{hnFr^+yCUEapU9d}DpZuaf+kEhoH9N;$tn)^X~=canJW<}RHo{a$T>1p zu2S-*+WKDUaI=RI)4A?4N z!Bpqi?l_e&RO_tODjm2#zFWfjhH+kbLhCC)Y%h#q;}POihg28i#Tr{~O#MLx2sYxg zT#Ospq|c3jKt-uJ>`m^`Zv*lt1hPXknCTnGveacuDnZ(PXAL+TJ56gxt~^gYqH@!N z&3`Rgb#~dBhT}@dNXnlWE1k+Lg}8r)6kHcP#|(c*Ol3}gMoYZ-i1$vPH`?2E6|J!4 zct&W77?}p0=V18@o{Y<(Ez!V+dE}UZt*EZ(Kl4%oJf2 zW08VO${dFSgNLT~=_P)OQxR%Xy~LF%US;C$n*n$dIT(&USaQ`q4UB`7`Igm`QNwV# z36!bek&tZ4XC~iuF{6U?;=OLVU1O)ep_}FFJ&qCkbH^S97)}}Ni5jcs)%!5@a6b|r zUBsLE93w?uDx;3f>Fbepq)5-ZSy1I6 zMWh0D${GJ- z5-YUc_77}L9>nSMTz*mn`4^dz)y+7Z3v`qQkrk4v*V$m`yZ~8Fzl|$q@|GGongXRh z(E-#ww_hhOObiQ<7p8>}MSyB=)Qa~mf5R+uziFMGY&J3No7pG!jfU{wotV;dx(ic%(<*~GPH&C@qMqpPA9vQ18w@!=E{gN3bmk&PINC4mi) z;dY0GdQ&2ptW0CWLWe0P1-;3yu-@TX6Bg09PckI%7cw@JiI+0QFFtV`hAQ`2_kDhf z!B&FU9(uH9z^NtI)_VkScYOcNRY!QA`RuE()o^6Jk|x{V2V~1+wAojo$5`Gh&m0M<^fx>IyR-eFK>djw}(K8vW4F{+-@ExGww!BH9`> ze;vtA@sf{Tnf5CltaVA}>}^TKZ($}`l|EbQQXB-esF%UgL_SXYu5g|23y#|MPioi+pr7OfCK21}^CLeq=utajP!EG9C zR;sDaEN>~rOa*Do5|ZrltP7FGL-qIPpJI3h{HYE3exP?H<^9{DehADE?^f9DtEg*E zP}jtnaLr}IUxw5T$m}>x7ix3M#)j7c)vyHp4*T~pcUl;byhzrTkA@QGGhM?_{C9z5 z00$qTO>))A&p)&2*$_=}-VY=Se+QDs)(fiVxGS=4pEYPZ`Zqkq|C^4IvOBrES&u@& zA$k%v(&T<_SiYZ?B3AD1T5 literal 0 HcmV?d00001 From 78f12de819848f12adfb11924999ece044d82d14 Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 22 Apr 2021 19:01:26 +0400 Subject: [PATCH 23/47] upd windows magnet link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bfdaa3d..b413ff3 100644 --- a/README.md +++ b/README.md @@ -194,7 +194,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul -Windows (magnet link) +Windows (magnet link) Last release. Use torrent client to download. From 23130cd56a9ab3aa2ebac94d4a518fb3b4f467fa Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 22 Apr 2021 21:12:42 +0400 Subject: [PATCH 24/47] fix for colab --- core/leras/device.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/core/leras/device.py b/core/leras/device.py index 2f65382..31d2f88 100644 --- a/core/leras/device.py +++ b/core/leras/device.py @@ -92,12 +92,13 @@ class Devices(object): @staticmethod def _get_tf_devices_proc(q : multiprocessing.Queue): - compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache_ALL') - os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path) - if not compute_cache_path.exists(): - io.log_info("Caching GPU kernels...") - compute_cache_path.mkdir(parents=True, exist_ok=True) - + if sys.platform[0:3] == 'win': + compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache_ALL') + os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path) + if not compute_cache_path.exists(): + io.log_info("Caching GPU kernels...") + compute_cache_path.mkdir(parents=True, exist_ok=True) + import tensorflow tf_version = tensorflow.version.VERSION @@ -156,7 +157,7 @@ class Devices(object): def initialize_main_env(): if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 0: return - + if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(): os.environ.pop('CUDA_VISIBLE_DEVICES') From e53d1b1820f7ce5bef0681dcaf098d2a5a469c3b Mon Sep 17 00:00:00 2001 From: iperov Date: Mon, 26 Apr 2021 10:51:06 +0400 Subject: [PATCH 25/47] XSeg trainer: added random relighting sample augmentation to improve generalization --- core/imagelib/__init__.py | 3 +- core/imagelib/filters.py | 94 ++++++++++++++++++++++++++++ samplelib/SampleGeneratorFaceXSeg.py | 8 ++- 3 files changed, 103 insertions(+), 2 deletions(-) diff --git a/core/imagelib/__init__.py b/core/imagelib/__init__.py index a4f1482..6049cb8 100644 --- a/core/imagelib/__init__.py +++ b/core/imagelib/__init__.py @@ -27,4 +27,5 @@ from .filters import apply_random_rgb_levels, \ apply_random_gaussian_blur, \ apply_random_nearest_resize, \ apply_random_bilinear_resize, \ - apply_random_jpeg_compress + apply_random_jpeg_compress, \ + apply_random_relight diff --git a/core/imagelib/filters.py b/core/imagelib/filters.py index eb45ecb..149b510 100644 --- a/core/imagelib/filters.py +++ b/core/imagelib/filters.py @@ -126,4 +126,98 @@ def apply_random_jpeg_compress( img, chance, mask=None, rnd_state=None ): if mask is not None: result = img*(1-mask) + result*mask + return result + + +def _min_resize(x, m): + if x.shape[0] < x.shape[1]: + s0 = m + s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1])) + else: + s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0])) + s1 = m + new_max = min(s1, s0) + raw_max = min(x.shape[0], x.shape[1]) + return cv2.resize(x, (s1, s0), interpolation=cv2.INTER_LANCZOS4) + +def _d_resize(x, d, fac=1.0): + new_min = min(int(d[1] * fac), int(d[0] * fac)) + raw_min = min(x.shape[0], x.shape[1]) + if new_min < raw_min: + interpolation = cv2.INTER_AREA + else: + interpolation = cv2.INTER_LANCZOS4 + y = cv2.resize(x, (int(d[1] * fac), int(d[0] * fac)), interpolation=interpolation) + return y + +def _get_image_gradient(dist): + cols = cv2.filter2D(dist, cv2.CV_32F, np.array([[-1, 0, +1], [-2, 0, +2], [-1, 0, +1]])) + rows = cv2.filter2D(dist, cv2.CV_32F, np.array([[-1, -2, -1], [0, 0, 0], [+1, +2, +1]])) + return cols, rows + +def _generate_lighting_effects(content): + h512 = content + h256 = cv2.pyrDown(h512) + h128 = cv2.pyrDown(h256) + h64 = cv2.pyrDown(h128) + h32 = cv2.pyrDown(h64) + h16 = cv2.pyrDown(h32) + c512, r512 = _get_image_gradient(h512) + c256, r256 = _get_image_gradient(h256) + c128, r128 = _get_image_gradient(h128) + c64, r64 = _get_image_gradient(h64) + c32, r32 = _get_image_gradient(h32) + c16, r16 = _get_image_gradient(h16) + c = c16 + c = _d_resize(cv2.pyrUp(c), c32.shape) * 4.0 + c32 + c = _d_resize(cv2.pyrUp(c), c64.shape) * 4.0 + c64 + c = _d_resize(cv2.pyrUp(c), c128.shape) * 4.0 + c128 + c = _d_resize(cv2.pyrUp(c), c256.shape) * 4.0 + c256 + c = _d_resize(cv2.pyrUp(c), c512.shape) * 4.0 + c512 + r = r16 + r = _d_resize(cv2.pyrUp(r), r32.shape) * 4.0 + r32 + r = _d_resize(cv2.pyrUp(r), r64.shape) * 4.0 + r64 + r = _d_resize(cv2.pyrUp(r), r128.shape) * 4.0 + r128 + r = _d_resize(cv2.pyrUp(r), r256.shape) * 4.0 + r256 + r = _d_resize(cv2.pyrUp(r), r512.shape) * 4.0 + r512 + coarse_effect_cols = c + coarse_effect_rows = r + EPS = 1e-10 + + max_effect = np.max((coarse_effect_cols**2 + coarse_effect_rows**2)**0.5, axis=0, keepdims=True, ).max(1, keepdims=True) + coarse_effect_cols = (coarse_effect_cols + EPS) / (max_effect + EPS) + coarse_effect_rows = (coarse_effect_rows + EPS) / (max_effect + EPS) + + return np.stack([ np.zeros_like(coarse_effect_rows), coarse_effect_rows, coarse_effect_cols], axis=-1) + +def apply_random_relight(img, mask=None, rnd_state=None): + if rnd_state is None: + rnd_state = np.random + + def_img = img + + if rnd_state.randint(2) == 0: + light_pos_y = 1.0 if rnd_state.randint(2) == 0 else -1.0 + light_pos_x = rnd_state.uniform()*2-1.0 + else: + light_pos_y = rnd_state.uniform()*2-1.0 + light_pos_x = 1.0 if rnd_state.randint(2) == 0 else -1.0 + + light_source_height = 0.3*rnd_state.uniform()*0.7 + light_intensity = 1.0+rnd_state.uniform() + ambient_intensity = 0.5 + + light_source_location = np.array([[[light_source_height, light_pos_y, light_pos_x ]]], dtype=np.float32) + light_source_direction = light_source_location / np.sqrt(np.sum(np.square(light_source_location))) + + lighting_effect = _generate_lighting_effects(img) + lighting_effect = np.sum(lighting_effect * light_source_direction, axis=-1).clip(0, 1) + lighting_effect = np.mean(lighting_effect, axis=-1, keepdims=True) + + result = def_img * (ambient_intensity + lighting_effect * light_intensity) #light_source_color + result = np.clip(result, 0, 1) + + if mask is not None: + result = def_img*(1-mask) + result*mask + return result \ No newline at end of file diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index b2019f4..a1276f7 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -138,6 +138,8 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): bg_img = imagelib.apply_random_hsv_shift(bg_img) else: bg_img = imagelib.apply_random_rgb_levels(bg_img) + + c_mask = 1.0 - (1-bg_mask) * (1-mask) rnd = np.random.uniform() @@ -151,12 +153,16 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): mask[mask < 0.5] = 0.0 mask[mask >= 0.5] = 1.0 mask = np.clip(mask, 0, 1) + + if np.random.randint(4) < 3: + img = imagelib.apply_random_relight(img) if np.random.randint(2) == 0: img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) else: img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution])) - + + if np.random.randint(2) == 0: # random face flare krn = np.random.randint( resolution//4, resolution ) From 87030bdcdfdf226dbcd6d0039533bad8b5dfceaa Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 27 Apr 2021 19:22:11 +0400 Subject: [PATCH 26/47] revert for more tests --- samplelib/SampleGeneratorFaceXSeg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index a1276f7..6eb29cf 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -154,8 +154,8 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): mask[mask >= 0.5] = 1.0 mask = np.clip(mask, 0, 1) - if np.random.randint(4) < 3: - img = imagelib.apply_random_relight(img) + #if np.random.randint(4) < 3: + # img = imagelib.apply_random_relight(img) if np.random.randint(2) == 0: img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) From 081d8faa4569976949cb7debf7c5e104c96203b4 Mon Sep 17 00:00:00 2001 From: iperov Date: Fri, 30 Apr 2021 16:49:32 +0400 Subject: [PATCH 27/47] XSeg sample generator: additional sample augmentation for training --- core/imagelib/__init__.py | 1 + core/imagelib/filters.py | 22 ++++++++++++++++++++++ samplelib/SampleGeneratorFaceXSeg.py | 10 +++------- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/core/imagelib/__init__.py b/core/imagelib/__init__.py index 6049cb8..11234a5 100644 --- a/core/imagelib/__init__.py +++ b/core/imagelib/__init__.py @@ -21,6 +21,7 @@ from .SegIEPolys import * from .blursharpen import LinearMotionBlur, blursharpen from .filters import apply_random_rgb_levels, \ + apply_random_overlay_triangle, \ apply_random_hsv_shift, \ apply_random_sharpen, \ apply_random_motion_blur, \ diff --git a/core/imagelib/filters.py b/core/imagelib/filters.py index 149b510..6b69576 100644 --- a/core/imagelib/filters.py +++ b/core/imagelib/filters.py @@ -128,7 +128,29 @@ def apply_random_jpeg_compress( img, chance, mask=None, rnd_state=None ): return result +def apply_random_overlay_triangle( img, max_alpha, mask=None, rnd_state=None ): + if rnd_state is None: + rnd_state = np.random + h,w,c = img.shape + pt1 = [rnd_state.randint(w), rnd_state.randint(h) ] + pt2 = [rnd_state.randint(w), rnd_state.randint(h) ] + pt3 = [rnd_state.randint(w), rnd_state.randint(h) ] + + alpha = rnd_state.uniform()*max_alpha + + tri_mask = cv2.fillPoly( np.zeros_like(img), [ np.array([pt1,pt2,pt3], np.int32) ], (alpha,)*c ) + + if rnd_state.randint(2) == 0: + result = np.clip(img+tri_mask, 0, 1) + else: + result = np.clip(img-tri_mask, 0, 1) + + if mask is not None: + result = img*(1-mask) + result*mask + + return result + def _min_resize(x, m): if x.shape[0] < x.shape[1]: s0 = m diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index 6eb29cf..c5fbc47 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -138,11 +138,9 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): bg_img = imagelib.apply_random_hsv_shift(bg_img) else: bg_img = imagelib.apply_random_rgb_levels(bg_img) - - c_mask = 1.0 - (1-bg_mask) * (1-mask) - rnd = np.random.uniform() + rnd = 0.15 + np.random.uniform()*0.85 img = img*(c_mask) + img*(1-c_mask)*rnd + bg_img*(1-c_mask)*(1-rnd) warp_params = imagelib.gen_warp_params(resolution, random_flip, rotation_range=rotation_range, scale_range=scale_range, tx_range=tx_range, ty_range=ty_range ) @@ -153,15 +151,13 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): mask[mask < 0.5] = 0.0 mask[mask >= 0.5] = 1.0 mask = np.clip(mask, 0, 1) - - #if np.random.randint(4) < 3: - # img = imagelib.apply_random_relight(img) + img = imagelib.apply_random_overlay_triangle(img, max_alpha=0.25, mask=sd.random_circle_faded ([resolution,resolution])) + if np.random.randint(2) == 0: img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) else: img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution])) - if np.random.randint(2) == 0: # random face flare From 6f5bccaa15d8c9062b84beaed95ed1954447f615 Mon Sep 17 00:00:00 2001 From: iperov Date: Fri, 30 Apr 2021 18:58:43 +0400 Subject: [PATCH 28/47] XSegEditor: added delete button --- XSegEditor/QIconDB.py | 1 + XSegEditor/QStringDB.py | 5 +++++ XSegEditor/XSegEditor.py | 19 +++++++++++++++++-- XSegEditor/gfx/icons/trashcan.png | Bin 0 -> 3285 bytes 4 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 XSegEditor/gfx/icons/trashcan.png diff --git a/XSegEditor/QIconDB.py b/XSegEditor/QIconDB.py index a2427c3..1fd9e3e 100644 --- a/XSegEditor/QIconDB.py +++ b/XSegEditor/QIconDB.py @@ -17,6 +17,7 @@ class QIconDB(): QIconDB.poly_type_exclude = QIcon ( str(icon_path / 'poly_type_exclude.png') ) QIconDB.left = QIcon ( str(icon_path / 'left.png') ) QIconDB.right = QIcon ( str(icon_path / 'right.png') ) + QIconDB.trashcan = QIcon ( str(icon_path / 'trashcan.png') ) QIconDB.pt_edit_mode = QIcon ( str(icon_path / 'pt_edit_mode.png') ) QIconDB.view_lock_center = QIcon ( str(icon_path / 'view_lock_center.png') ) QIconDB.view_baked = QIcon ( str(icon_path / 'view_baked.png') ) diff --git a/XSegEditor/QStringDB.py b/XSegEditor/QStringDB.py index 632419e..475b97d 100644 --- a/XSegEditor/QStringDB.py +++ b/XSegEditor/QStringDB.py @@ -85,6 +85,11 @@ class QStringDB(): 'zh' : '保存并转到下一张图片\n按住SHIFT : 加快\n按住CTRL : 跳过未标记的\n', }[lang] + QStringDB.btn_delete_image_tip = { 'en' : 'Delete and Next image\n', + 'ru' : 'Удалить и следующее изображение\n', + 'zh' : '清除并转到下一张图片', + }[lang] + QStringDB.loading_tip = {'en' : 'Loading', 'ru' : 'Загрузка', 'zh' : '正在载入', diff --git a/XSegEditor/XSegEditor.py b/XSegEditor/XSegEditor.py index c9cb6aa..d934850 100644 --- a/XSegEditor/XSegEditor.py +++ b/XSegEditor/XSegEditor.py @@ -1341,7 +1341,15 @@ class MainWindow(QXMainWindow): self.update_cached_images() self.update_preview_bar() - + + def delete_current_image(self): + self.process_next_image() + + img_path = self.image_paths_done.pop(-1) + Path(img_path).unlink() + self.update_cached_images() + self.update_preview_bar() + def initialize_ui(self): self.canvas = QCanvas() @@ -1356,12 +1364,19 @@ class MainWindow(QXMainWindow): btn_next_image = QXIconButton(QIconDB.right, QStringDB.btn_next_image_tip, shortcut='D', click_func=self.process_next_image) btn_next_image.setIconSize(QUIConfig.preview_bar_icon_q_size) - + btn_delete_image = QXIconButton(QIconDB.trashcan, QStringDB.btn_delete_image_tip, shortcut='X', click_func=self.delete_current_image) + btn_delete_image.setIconSize(QUIConfig.preview_bar_icon_q_size) + + pad_image = QWidget() + pad_image.setFixedSize(QUIConfig.preview_bar_icon_q_size) + preview_image_bar_frame_l = QHBoxLayout() preview_image_bar_frame_l.setContentsMargins(0,0,0,0) + preview_image_bar_frame_l.addWidget ( pad_image, alignment=Qt.AlignCenter) preview_image_bar_frame_l.addWidget ( btn_prev_image, alignment=Qt.AlignCenter) preview_image_bar_frame_l.addWidget ( image_bar) preview_image_bar_frame_l.addWidget ( btn_next_image, alignment=Qt.AlignCenter) + preview_image_bar_frame_l.addWidget ( btn_delete_image, alignment=Qt.AlignCenter) preview_image_bar_frame = QFrame() preview_image_bar_frame.setSizePolicy ( QSizePolicy.Fixed, QSizePolicy.Fixed ) diff --git a/XSegEditor/gfx/icons/trashcan.png b/XSegEditor/gfx/icons/trashcan.png new file mode 100644 index 0000000000000000000000000000000000000000..a31285b12b79836107a336ccc0c7eb4e67c01fbc GIT binary patch literal 3285 zcmd5F*g%)F*6t{rV=(1tK4#{lr9df zWh0dd)uy796}fFo^0;m)(^=}gBYE2M>^b|K{p-v#@4UbF{e6C)@Avb)J+P7Kh8HAf_4`5pXV(g4k*Ah4B))vckC@@gmmt zcyE73d<28UL^wOa9i?P|fX9;1;Zj~CUrd%#5DUCyV6A+OM!**!k_ZZ7qtYOJmzOWx zRUl%)9Z(n~1B0=H6G$km1Ku7-u!Z9=I1CyKeC?4~0vS&v+u`7g9|T}c#AK7VQ8zEz z0-h*{aEU}nMx$e6V^OhoD1nHB#*#=RGzN#p;gA3VDURby=u#wKZ1I_a$`Ug~T%m+3 z;KP-S^e{oRgn|G({Um`WT%zTR7sCVu3@xP#(O48lDQN-7WGvx?(W1x&<4gvc70KeU z_!2RI#V%om;R1<394`0|)JxlcFaU(s%WKKTpZdb%EtwEY++qM3ivjslwAepR$U<*p zi3QOj2Foo5aMMDmMo4xQvFH+k$X_6c{G2G?&o0ApI20Ccy_3sl3S!0Uzc|36(j_bk z0>~N`iNPc7F#cE~*&av6l58+|G6wSr>Lp-u*>QggwIka(`~eD-43jRQ|0S5oAhQJ` z9vu+O<^EgekUh9!AX#yX9kHDiwYZAp!WXK5OlK&on}T2{)5>BZ7GHD!b_71r#)h*1(*Kg~ zPcX57Es3RzSQ|M&wEnW3(17!3WgmXhf&O=i3wvMm>}NPI2$ajE2?RVW%`g@p7+fM? zLKWZjGXjCsGicO}{?fvUB4^Klordzn8cQF`s572FJ(9s5m3{B+m~HvY@)$ADN^*H&<(lXJ%dE(=!1ul zv5b$TB#CKzxpk}&F?UiwZR-Y&X&s_oc8JPNiy-w%gEboqJr#}E!9B@!N6vPt<$)@1 z4|N3v97n)hQD4h3lSb#J_dgnkT{x^KG`CF5`scoJvHW7*TyfG|)00+k;VnvU5Vhb) zmuYJ~Bsl$Ei3>B5_Dwxhp?MYLswincJ{D1^!jDH)nb+*q!leHGqFslL<$P@^z`Mk0 zoeog5zB{xV>i-f__Qb%AhE!ompU1u(toC zL;LiTsyO!>2V{Jv?fw;wY3R+4ms8Wl^1gWR@T$_JH|c1dgvq{C&MNLZ(LelO>W1o! zhIcl!@f9qU{5P%m^(Sj#(u!{~ch9|uG7OwDd?518F$&eGSL_%zIJF$%ddb9-t7dH; z(_J)RWQ)*S>wUPK2mbEe&pc3W)^Ea;d9~l42hAU|%A#nYlC!$+Q9GW3`aFD|1iT$k z_kPl36NcR4Ts`}^_IdrG-kR@gT?&i_^(WIBLHobmJ;%OssKU#Z^m<<=sbE)!#$drn zKXr2d*|}B*Gs#=) z5-j7;ZXHYh<=Pt9NaS~_Qdl-2Q@=OdFrWQ#FHLVSAa#@0)CrT(%QSUM=rnjXso4;D z-JtT=D|gF;GrP8ui4U#iowr^=D$f)9HQb?bt2=H7ZMUROb5ixi z_JKKv&9Wx!Z8py+aumGrDUSx@>qjC9Ta0%YdT|O)7?@S_ph502A5d(H#aTKtfk_!F zmai&Xlh-CK)XmW<>uI06dC1Xc881N72%fNFZxRQRpEhdYS{ssfJjHdZ+^?*wJMB9E zw#HpthjBaP_?Xtutv>2snTB_!xN7<2fm*RDCm3MsyQ;q48Y*|PW=9=WD=nr*39qrO zZG!i(>i5kyH&YJ!T~1DuTRCa^HJQf}ems{x zKR$WElv-S0l}r2ZvG1Tav$pce@9WZRWn;JQoOX%2AAKkK_N(;uKaA=J${-nU;-7Ec z**hP7=UCd~Ve=TBqqefgAWzWnm*O*^rN%H9lBG=a|FN;1sj`RMF`#<1!H?5hXISOf zqJ2o9vcqIlUDQ(0p4UHK2Wm%jICRhJzQRjL9U;T1!!1`ql?^uSDX{Wq$fs_D+>Bkd z6`&_m=a4F#v2@MX^!q*Egu;kg6`b_#P4iH4m!ME{U_d*jD46745j|v6=$>`ltqt5h zQ|PZcB0JUjETZ#Skm(C2&PZXyrKp+x8|sX+M*?ISSI>RdE;W!%nZ!X9^9u0>anUOy zF6@2MTttZ4dp?-Wc)fgHcNpvkzGECWBfYK|43VwsHMwQyQZ@3rCyO?OI{u<^s%g^- zNMcl3Echz;M3?sp1YwK*sg|xQ^%~}KeNnZa@x~qkgU}~mAv#Nqg0y!!%Tsb^6{Z@~ zgz1&j^%ftuhnitV8U|ONH&B=;o@D`Z8T{exZsCP|dBjh&2I9)Oqp77uexbT+oUSQU zoSUESPSOk2BxW?MFD+Jj9e+)y5JUk~G1+F;hC2@<7*Xz%v;s`{v;VCKQv^XGLhAKWQi$x9DkEo)Z2 zsd!X3Jb23E?Fi#uZD0Q8$+-$mkNWVT1le5|GvP@^+vy#vGfxc}t302Z}T3-(YKgEFyU3O@ydvtB!vsPz%napNh>`V54i~0aL3EgzWX>_ty P`NxCi=1r~K6nf}iVz(@| literal 0 HcmV?d00001 From d204e049d166b0861cc81c23a072e55d74efaf69 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 4 May 2021 23:16:47 +0400 Subject: [PATCH 29/47] revert --- samplelib/SampleGeneratorFaceXSeg.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index c5fbc47..669b044 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -152,8 +152,6 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): mask[mask >= 0.5] = 1.0 mask = np.clip(mask, 0, 1) - img = imagelib.apply_random_overlay_triangle(img, max_alpha=0.25, mask=sd.random_circle_faded ([resolution,resolution])) - if np.random.randint(2) == 0: img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) else: From aa26089032d5679838f489a53239eed0701aaebb Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 6 May 2021 10:52:04 +0400 Subject: [PATCH 30/47] FacesetResizer now supports changing face type --- mainscripts/FacesetResizer.py | 121 +++++++++++++++++++++++++--------- 1 file changed, 91 insertions(+), 30 deletions(-) diff --git a/mainscripts/FacesetResizer.py b/mainscripts/FacesetResizer.py index f2ec391..2c58e77 100644 --- a/mainscripts/FacesetResizer.py +++ b/mainscripts/FacesetResizer.py @@ -1,21 +1,23 @@ import multiprocessing import shutil +import cv2 from core import pathex from core.cv2ex import * from core.interact import interact as io from core.joblib import Subprocessor from DFLIMG import * -from facelib import LandmarksProcessor, FaceType +from facelib import FaceType, LandmarksProcessor class FacesetResizerSubprocessor(Subprocessor): #override - def __init__(self, image_paths, output_dirpath, image_size): + def __init__(self, image_paths, output_dirpath, image_size, face_type=None): self.image_paths = image_paths self.output_dirpath = output_dirpath self.image_size = image_size + self.face_type = face_type self.result = [] super().__init__('FacesetResizer', FacesetResizerSubprocessor.Cli, 600) @@ -30,7 +32,7 @@ class FacesetResizerSubprocessor(Subprocessor): #override def process_info_generator(self): - base_dict = {'output_dirpath':self.output_dirpath, 'image_size':self.image_size} + base_dict = {'output_dirpath':self.output_dirpath, 'image_size':self.image_size, 'face_type':self.face_type} for device_idx in range( min(8, multiprocessing.cpu_count()) ): client_dict = base_dict.copy() @@ -63,6 +65,7 @@ class FacesetResizerSubprocessor(Subprocessor): def on_initialize(self, client_dict): self.output_dirpath = client_dict['output_dirpath'] self.image_size = client_dict['image_size'] + self.face_type = client_dict['face_type'] self.log_info (f"Running on { client_dict['device_name'] }") #override @@ -72,39 +75,87 @@ class FacesetResizerSubprocessor(Subprocessor): if dflimg is None or not dflimg.has_data(): self.log_err (f"{filepath.name} is not a dfl image file") else: - dfl_dict = dflimg.get_dict() - img = cv2_imread(filepath) h,w = img.shape[:2] if h != w: raise Exception(f'w != h in {filepath}') image_size = self.image_size - scale = w / image_size - - img = cv2.resize(img, (image_size, image_size), interpolation=cv2.INTER_LANCZOS4) - + face_type = self.face_type output_filepath = self.output_dirpath / filepath.name - cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] ) + + if face_type is not None: + lmrks = dflimg.get_landmarks() + mat = LandmarksProcessor.get_transform_mat(lmrks, image_size, face_type) + + img = cv2.warpAffine(img, mat, (image_size, image_size), flags=cv2.INTER_LANCZOS4 ) + img = np.clip(img, 0, 255).astype(np.uint8) + + cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] ) - dflimg = DFLIMG.load (output_filepath) - dflimg.set_dict(dfl_dict) - - - lmrks = dflimg.get_landmarks() - lmrks /= scale - dflimg.set_landmarks(lmrks) - - seg_ie_polys = dflimg.get_seg_ie_polys() - seg_ie_polys.mult_points( 1.0 / scale) - dflimg.set_seg_ie_polys(seg_ie_polys) - - mat = dflimg.get_image_to_face_mat() - if mat is not None: - face_type = FaceType.fromString ( dflimg.get_face_type() ) - mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type ) - dflimg.set_image_to_face_mat(mat) - dflimg.save() + dfl_dict = dflimg.get_dict() + dflimg = DFLIMG.load (output_filepath) + dflimg.set_dict(dfl_dict) + + xseg_mask = dflimg.get_xseg_mask() + if xseg_mask is not None: + xseg_res = 256 + + xseg_lmrks = lmrks.copy() + xseg_lmrks *= (xseg_res / w) + xseg_mat = LandmarksProcessor.get_transform_mat(xseg_lmrks, xseg_res, face_type) + + xseg_mask = cv2.warpAffine(xseg_mask, xseg_mat, (xseg_res, xseg_res), flags=cv2.INTER_LANCZOS4 ) + xseg_mask[xseg_mask < 0.5] = 0 + xseg_mask[xseg_mask >= 0.5] = 1 + + dflimg.set_xseg_mask(xseg_mask) + + seg_ie_polys = dflimg.get_seg_ie_polys() + + for poly in seg_ie_polys.get_polys(): + poly_pts = poly.get_pts() + poly_pts = LandmarksProcessor.transform_points(poly_pts, mat) + poly.set_points(poly_pts) + + dflimg.set_seg_ie_polys(seg_ie_polys) + + lmrks = LandmarksProcessor.transform_points(lmrks, mat) + dflimg.set_landmarks(lmrks) + + image_to_face_mat = dflimg.get_image_to_face_mat() + if image_to_face_mat is not None: + image_to_face_mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type ) + dflimg.set_image_to_face_mat(image_to_face_mat) + dflimg.save() + + else: + dfl_dict = dflimg.get_dict() + + scale = w / image_size + + img = cv2.resize(img, (image_size, image_size), interpolation=cv2.INTER_LANCZOS4) + + cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] ) + + dflimg = DFLIMG.load (output_filepath) + dflimg.set_dict(dfl_dict) + + lmrks = dflimg.get_landmarks() + lmrks /= scale + dflimg.set_landmarks(lmrks) + + seg_ie_polys = dflimg.get_seg_ie_polys() + seg_ie_polys.mult_points( 1.0 / scale) + dflimg.set_seg_ie_polys(seg_ie_polys) + + image_to_face_mat = dflimg.get_image_to_face_mat() + + if image_to_face_mat is not None: + face_type = FaceType.fromString ( dflimg.get_face_type() ) + image_to_face_mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type ) + dflimg.set_image_to_face_mat(image_to_face_mat) + dflimg.save() return (1, filepath, output_filepath) except: @@ -115,7 +166,17 @@ class FacesetResizerSubprocessor(Subprocessor): def process_folder ( dirpath): image_size = io.input_int(f"New image size", 512, valid_range=[256,2048]) - + + face_type = io.input_str ("Change face type", 'same', ['h','mf','f','wf','head','same']).lower() + if face_type == 'same': + face_type = None + else: + face_type = {'h' : FaceType.HALF, + 'mf' : FaceType.MID_FULL, + 'f' : FaceType.FULL, + 'wf' : FaceType.WHOLE_FACE, + 'head' : FaceType.HEAD}[face_type] + output_dirpath = dirpath.parent / (dirpath.name + '_resized') output_dirpath.mkdir (exist_ok=True, parents=True) @@ -131,7 +192,7 @@ def process_folder ( dirpath): Path(filename).unlink() image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )] - result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size).run() + result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size, face_type).run() is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True) if is_merge: From b15cdd96a1934e39c87238b7127876e200a8cb25 Mon Sep 17 00:00:00 2001 From: iperov Date: Sun, 9 May 2021 20:07:36 +0400 Subject: [PATCH 31/47] fix FacesetResizer --- mainscripts/FacesetResizer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mainscripts/FacesetResizer.py b/mainscripts/FacesetResizer.py index 2c58e77..acc4ea4 100644 --- a/mainscripts/FacesetResizer.py +++ b/mainscripts/FacesetResizer.py @@ -127,6 +127,7 @@ class FacesetResizerSubprocessor(Subprocessor): if image_to_face_mat is not None: image_to_face_mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type ) dflimg.set_image_to_face_mat(image_to_face_mat) + dflimg.set_face_type( FaceType.toString(face_type) ) dflimg.save() else: From 8b90ca0dacec05d95f4df8a75b85cbc9e701fe2b Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 12 May 2021 09:41:53 +0400 Subject: [PATCH 32/47] XSegUtil apply xseg now checks model face type --- mainscripts/XSegUtil.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/mainscripts/XSegUtil.py b/mainscripts/XSegUtil.py index b95bb2c..c75a14a 100644 --- a/mainscripts/XSegUtil.py +++ b/mainscripts/XSegUtil.py @@ -11,7 +11,7 @@ from core.interact import interact as io from core.leras import nn from DFLIMG import * from facelib import XSegNet, LandmarksProcessor, FaceType - +import pickle def apply_xseg(input_path, model_path): if not input_path.exists(): @@ -19,21 +19,37 @@ def apply_xseg(input_path, model_path): if not model_path.exists(): raise ValueError(f'{model_path} not found. Please ensure it exists.') - - face_type = io.input_str ("XSeg model face type", 'same', ['h','mf','f','wf','head','same'], help_message="Specify face type of trained XSeg model. For example if XSeg model trained as WF, but faceset is HEAD, specify WF to apply xseg only on WF part of HEAD. Default is 'same'").lower() - if face_type == 'same': - face_type = None - else: + + face_type = None + + model_dat = model_path / 'XSeg_data.dat' + if model_dat.exists(): + dat = pickle.loads( model_dat.read_bytes() ) + dat_options = dat.get('options', None) + if dat_options is not None: + face_type = dat_options.get('face_type', None) + + + + if face_type is None: + face_type = io.input_str ("XSeg model face type", 'same', ['h','mf','f','wf','head','same'], help_message="Specify face type of trained XSeg model. For example if XSeg model trained as WF, but faceset is HEAD, specify WF to apply xseg only on WF part of HEAD. Default is 'same'").lower() + if face_type == 'same': + face_type = None + + if face_type is not None: face_type = {'h' : FaceType.HALF, 'mf' : FaceType.MID_FULL, 'f' : FaceType.FULL, 'wf' : FaceType.WHOLE_FACE, 'head' : FaceType.HEAD}[face_type] + io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.') device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True) nn.initialize(device_config) + + xseg = XSegNet(name='XSeg', load_weights=True, weights_file_root=model_path, From 65a703c02434055a3a3b6bb6e0a5ae38ba7afa47 Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 12 May 2021 09:42:29 +0400 Subject: [PATCH 33/47] SampleGeneratorFaceXSeg: additional sample augmentation for better generalization --- samplelib/SampleGeneratorFaceXSeg.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index 669b044..7e38e64 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -151,11 +151,6 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): mask[mask < 0.5] = 0.0 mask[mask >= 0.5] = 1.0 mask = np.clip(mask, 0, 1) - - if np.random.randint(2) == 0: - img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) - else: - img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution])) if np.random.randint(2) == 0: # random face flare @@ -163,6 +158,17 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): krn = krn - krn % 2 + 1 img = img + cv2.GaussianBlur(img*mask, (krn,krn), 0) + if np.random.randint(2) == 0: + # random bg flare + krn = np.random.randint( resolution//4, resolution ) + krn = krn - krn % 2 + 1 + img = img + cv2.GaussianBlur(img*(1-mask), (krn,krn), 0) + + if np.random.randint(2) == 0: + img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) + else: + img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution])) + if np.random.randint(2) == 0: img = imagelib.apply_random_sharpen( img, sharpen_chance, sharpen_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution])) else: From 66bb72f1642305fdbd2223b550ef64b0f9ff714a Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 12 May 2021 13:28:48 +0400 Subject: [PATCH 34/47] XSeg model has been changed to work better with large amount of various faces, thus you should retrain existing xseg model. Windows build: Added Generic XSeg model pretrained on various faces. It is most suitable for src faceset because it contains clean faces, but also can be applied on dst footage without complex face obstructions. --- core/leras/models/XSeg.py | 55 +++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/core/leras/models/XSeg.py b/core/leras/models/XSeg.py index 0ba19a6..e6bde65 100644 --- a/core/leras/models/XSeg.py +++ b/core/leras/models/XSeg.py @@ -28,11 +28,12 @@ class XSeg(nn.ModelBase): x = self.frn(x) x = self.tlu(x) return x + + self.base_ch = base_ch self.conv01 = ConvBlock(in_ch, base_ch) self.conv02 = ConvBlock(base_ch, base_ch) - self.bp0 = nn.BlurPool (filt_size=3) - + self.bp0 = nn.BlurPool (filt_size=4) self.conv11 = ConvBlock(base_ch, base_ch*2) self.conv12 = ConvBlock(base_ch*2, base_ch*2) @@ -40,19 +41,30 @@ class XSeg(nn.ModelBase): self.conv21 = ConvBlock(base_ch*2, base_ch*4) self.conv22 = ConvBlock(base_ch*4, base_ch*4) - self.conv23 = ConvBlock(base_ch*4, base_ch*4) - self.bp2 = nn.BlurPool (filt_size=3) - + self.bp2 = nn.BlurPool (filt_size=2) self.conv31 = ConvBlock(base_ch*4, base_ch*8) self.conv32 = ConvBlock(base_ch*8, base_ch*8) self.conv33 = ConvBlock(base_ch*8, base_ch*8) - self.bp3 = nn.BlurPool (filt_size=3) + self.bp3 = nn.BlurPool (filt_size=2) self.conv41 = ConvBlock(base_ch*8, base_ch*8) self.conv42 = ConvBlock(base_ch*8, base_ch*8) self.conv43 = ConvBlock(base_ch*8, base_ch*8) - self.bp4 = nn.BlurPool (filt_size=3) + self.bp4 = nn.BlurPool (filt_size=2) + + self.conv51 = ConvBlock(base_ch*8, base_ch*8) + self.conv52 = ConvBlock(base_ch*8, base_ch*8) + self.conv53 = ConvBlock(base_ch*8, base_ch*8) + self.bp5 = nn.BlurPool (filt_size=2) + + self.dense1 = nn.Dense ( 4*4* base_ch*8, 512) + self.dense2 = nn.Dense ( 512, 4*4* base_ch*8) + + self.up5 = UpConvBlock (base_ch*8, base_ch*4) + self.uconv53 = ConvBlock(base_ch*12, base_ch*8) + self.uconv52 = ConvBlock(base_ch*8, base_ch*8) + self.uconv51 = ConvBlock(base_ch*8, base_ch*8) self.up4 = UpConvBlock (base_ch*8, base_ch*4) self.uconv43 = ConvBlock(base_ch*12, base_ch*8) @@ -65,8 +77,7 @@ class XSeg(nn.ModelBase): self.uconv31 = ConvBlock(base_ch*8, base_ch*8) self.up2 = UpConvBlock (base_ch*8, base_ch*4) - self.uconv23 = ConvBlock(base_ch*8, base_ch*4) - self.uconv22 = ConvBlock(base_ch*4, base_ch*4) + self.uconv22 = ConvBlock(base_ch*8, base_ch*4) self.uconv21 = ConvBlock(base_ch*4, base_ch*4) self.up1 = UpConvBlock (base_ch*4, base_ch*2) @@ -78,8 +89,7 @@ class XSeg(nn.ModelBase): self.uconv01 = ConvBlock(base_ch, base_ch) self.out_conv = nn.Conv2D (base_ch, out_ch, kernel_size=3, padding='SAME') - self.conv_center = ConvBlock(base_ch*8, base_ch*8) - + def forward(self, inp): x = inp @@ -92,8 +102,7 @@ class XSeg(nn.ModelBase): x = self.bp1(x) x = self.conv21(x) - x = self.conv22(x) - x = x2 = self.conv23(x) + x = x2 = self.conv22(x) x = self.bp2(x) x = self.conv31(x) @@ -106,8 +115,21 @@ class XSeg(nn.ModelBase): x = x4 = self.conv43(x) x = self.bp4(x) - x = self.conv_center(x) - + x = self.conv51(x) + x = self.conv52(x) + x = x5 = self.conv53(x) + x = self.bp5(x) + + x = nn.flatten(x) + x = self.dense1(x) + x = self.dense2(x) + x = nn.reshape_4D (x, 4, 4, self.base_ch*8 ) + + x = self.up5(x) + x = self.uconv53(tf.concat([x,x5],axis=nn.conv2d_ch_axis)) + x = self.uconv52(x) + x = self.uconv51(x) + x = self.up4(x) x = self.uconv43(tf.concat([x,x4],axis=nn.conv2d_ch_axis)) x = self.uconv42(x) @@ -119,8 +141,7 @@ class XSeg(nn.ModelBase): x = self.uconv31(x) x = self.up2(x) - x = self.uconv23(tf.concat([x,x2],axis=nn.conv2d_ch_axis)) - x = self.uconv22(x) + x = self.uconv22(tf.concat([x,x2],axis=nn.conv2d_ch_axis)) x = self.uconv21(x) x = self.up1(x) From deedd3dd12ab702315b3b2901f639690baecaf87 Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 12 May 2021 13:29:01 +0400 Subject: [PATCH 35/47] upd windows magnet link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b413ff3..3805772 100644 --- a/README.md +++ b/README.md @@ -194,7 +194,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul -Windows (magnet link) +Windows (magnet link) Last release. Use torrent client to download. From 41b517517e7a41092c820ab134f9a7df0491bd08 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 25 May 2021 14:17:34 +0400 Subject: [PATCH 36/47] XSegEditor , delete button now moves the face to _trash directory and it has been moved to the right border of the window --- XSegEditor/QStringDB.py | 6 +++--- XSegEditor/XSegEditor.py | 25 +++++++++++++++++++------ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/XSegEditor/QStringDB.py b/XSegEditor/QStringDB.py index 475b97d..b9100d2 100644 --- a/XSegEditor/QStringDB.py +++ b/XSegEditor/QStringDB.py @@ -85,9 +85,9 @@ class QStringDB(): 'zh' : '保存并转到下一张图片\n按住SHIFT : 加快\n按住CTRL : 跳过未标记的\n', }[lang] - QStringDB.btn_delete_image_tip = { 'en' : 'Delete and Next image\n', - 'ru' : 'Удалить и следующее изображение\n', - 'zh' : '清除并转到下一张图片', + QStringDB.btn_delete_image_tip = { 'en' : 'Move to _trash and Next image\n', + 'ru' : 'Переместить в _trash и следующее изображение\n', + 'zh' : '移至_trash,转到下一张图片 ', }[lang] QStringDB.loading_tip = {'en' : 'Loading', diff --git a/XSegEditor/XSegEditor.py b/XSegEditor/XSegEditor.py index d934850..affc9f6 100644 --- a/XSegEditor/XSegEditor.py +++ b/XSegEditor/XSegEditor.py @@ -1164,6 +1164,7 @@ class MainWindow(QXMainWindow): super().__init__() self.input_dirpath = input_dirpath + self.trash_dirpath = input_dirpath.parent / (input_dirpath.name + '_trash') self.cfg_root_path = cfg_root_path self.cfg_path = cfg_root_path / 'MainWindow_cfg.dat' @@ -1342,11 +1343,14 @@ class MainWindow(QXMainWindow): self.update_cached_images() self.update_preview_bar() - def delete_current_image(self): + def trash_current_image(self): self.process_next_image() img_path = self.image_paths_done.pop(-1) - Path(img_path).unlink() + img_path = Path(img_path) + self.trash_dirpath.mkdir(parents=True, exist_ok=True) + img_path.rename( self.trash_dirpath / img_path.name ) + self.update_cached_images() self.update_preview_bar() @@ -1364,7 +1368,7 @@ class MainWindow(QXMainWindow): btn_next_image = QXIconButton(QIconDB.right, QStringDB.btn_next_image_tip, shortcut='D', click_func=self.process_next_image) btn_next_image.setIconSize(QUIConfig.preview_bar_icon_q_size) - btn_delete_image = QXIconButton(QIconDB.trashcan, QStringDB.btn_delete_image_tip, shortcut='X', click_func=self.delete_current_image) + btn_delete_image = QXIconButton(QIconDB.trashcan, QStringDB.btn_delete_image_tip, shortcut='X', click_func=self.trash_current_image) btn_delete_image.setIconSize(QUIConfig.preview_bar_icon_q_size) pad_image = QWidget() @@ -1376,15 +1380,24 @@ class MainWindow(QXMainWindow): preview_image_bar_frame_l.addWidget ( btn_prev_image, alignment=Qt.AlignCenter) preview_image_bar_frame_l.addWidget ( image_bar) preview_image_bar_frame_l.addWidget ( btn_next_image, alignment=Qt.AlignCenter) - preview_image_bar_frame_l.addWidget ( btn_delete_image, alignment=Qt.AlignCenter) + #preview_image_bar_frame_l.addWidget ( btn_delete_image, alignment=Qt.AlignCenter) preview_image_bar_frame = QFrame() preview_image_bar_frame.setSizePolicy ( QSizePolicy.Fixed, QSizePolicy.Fixed ) preview_image_bar_frame.setLayout(preview_image_bar_frame_l) - preview_image_bar_l = QHBoxLayout() - preview_image_bar_l.addWidget (preview_image_bar_frame) + preview_image_bar_frame2_l = QHBoxLayout() + preview_image_bar_frame2_l.setContentsMargins(0,0,0,0) + preview_image_bar_frame2_l.addWidget ( btn_delete_image, alignment=Qt.AlignCenter) + preview_image_bar_frame2 = QFrame() + preview_image_bar_frame2.setSizePolicy ( QSizePolicy.Fixed, QSizePolicy.Fixed ) + preview_image_bar_frame2.setLayout(preview_image_bar_frame2_l) + + preview_image_bar_l = QHBoxLayout() + preview_image_bar_l.addWidget (preview_image_bar_frame, alignment=Qt.AlignCenter) + preview_image_bar_l.addWidget (preview_image_bar_frame2) + preview_image_bar = QFrame() preview_image_bar.setFrameShape(QFrame.StyledPanel) preview_image_bar.setSizePolicy ( QSizePolicy.Expanding, QSizePolicy.Fixed ) From 766750941a1e0f8363637fdbd23a9c267de2cbfe Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 25 May 2021 14:21:38 +0400 Subject: [PATCH 37/47] Faceset packer now asks whether to delete the original files --- samplelib/PackedFaceset.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/samplelib/PackedFaceset.py b/samplelib/PackedFaceset.py index 2ea01d9..e7ae1d4 100644 --- a/samplelib/PackedFaceset.py +++ b/samplelib/PackedFaceset.py @@ -84,17 +84,18 @@ class PackedFaceset(): of.write ( struct.pack("Q", offset) ) of.seek(0,2) of.close() + + if io.input_bool(f"Delete original files?", True): + for filename in io.progress_bar_generator(image_paths, "Deleting files"): + Path(filename).unlink() - for filename in io.progress_bar_generator(image_paths, "Deleting files"): - Path(filename).unlink() - - if as_person_faceset: - for dir_name in io.progress_bar_generator(dir_names, "Deleting dirs"): - dir_path = samples_path / dir_name - try: - shutil.rmtree(dir_path) - except: - io.log_info (f"unable to remove: {dir_path} ") + if as_person_faceset: + for dir_name in io.progress_bar_generator(dir_names, "Deleting dirs"): + dir_path = samples_path / dir_name + try: + shutil.rmtree(dir_path) + except: + io.log_info (f"unable to remove: {dir_path} ") @staticmethod def unpack(samples_path): From 757283d10e40a873ecafc2cba64d325ca2014d4c Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 25 May 2021 14:23:25 +0400 Subject: [PATCH 38/47] Trainer now saves every 25 min instead of 15 --- mainscripts/Trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mainscripts/Trainer.py b/mainscripts/Trainer.py index 4afc218..66afd71 100644 --- a/mainscripts/Trainer.py +++ b/mainscripts/Trainer.py @@ -33,7 +33,7 @@ def trainerThread (s2c, c2s, e, try: start_time = time.time() - save_interval_min = 15 + save_interval_min = 25 if not training_data_src_path.exists(): training_data_src_path.mkdir(exist_ok=True, parents=True) From e6e2ee74664def0ecd3de546c95690f78aadce5c Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 25 May 2021 14:26:48 +0400 Subject: [PATCH 39/47] pixel_norm op --- core/leras/ops/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/leras/ops/__init__.py b/core/leras/ops/__init__.py index 500a22a..09e8e7a 100644 --- a/core/leras/ops/__init__.py +++ b/core/leras/ops/__init__.py @@ -204,7 +204,7 @@ def random_binomial(shape, p=0.0, dtype=None, seed=None): seed = np.random.randint(10e6) return array_ops.where( random_ops.random_uniform(shape, dtype=tf.float16, seed=seed) < p, - array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) + array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) nn.random_binomial = random_binomial def gaussian_blur(input, radius=2.0): @@ -391,6 +391,11 @@ def total_variation_mse(images): return tot_var nn.total_variation_mse = total_variation_mse + +def pixel_norm(x, axes): + return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=axes, keepdims=True) + 1e-06) +nn.pixel_norm = pixel_norm + """ def tf_suppress_lower_mean(t, eps=0.00001): if t.shape.ndims != 1: From 6f86c68e65bdca8a531d473ed95846f86b8c4001 Mon Sep 17 00:00:00 2001 From: iperov Date: Sat, 29 May 2021 18:57:35 +0400 Subject: [PATCH 40/47] add AMP model --- models/Model_AMP/Model.py | 768 +++++++++++++++++++++++++++++++++++ models/Model_AMP/__init__.py | 1 + 2 files changed, 769 insertions(+) create mode 100644 models/Model_AMP/Model.py create mode 100644 models/Model_AMP/__init__.py diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py new file mode 100644 index 0000000..95f08bb --- /dev/null +++ b/models/Model_AMP/Model.py @@ -0,0 +1,768 @@ +import multiprocessing +import operator +from functools import partial + +import numpy as np + +from core import mathlib +from core.interact import interact as io +from core.leras import nn +from facelib import FaceType +from models import ModelBase +from samplelib import * +from core.cv2ex import * + +class AMPModel(ModelBase): + + #override + def on_initialize_options(self): + device_config = nn.getCurrentDeviceConfig() + + lowest_vram = 2 + if len(device_config.devices) != 0: + lowest_vram = device_config.devices.get_worst_device().total_mem_gb + + if lowest_vram >= 4: + suggest_batch_size = 8 + else: + suggest_batch_size = 4 + + yn_str = {True:'y',False:'n'} + min_res = 64 + max_res = 640 + + default_resolution = self.options['resolution'] = self.load_or_def_option('resolution', 224) + default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf') + default_models_opt_on_gpu = self.options['models_opt_on_gpu'] = self.load_or_def_option('models_opt_on_gpu', True) + + default_ae_dims = self.options['ae_dims'] = self.load_or_def_option('ae_dims', 256) + default_e_dims = self.options['e_dims'] = self.load_or_def_option('e_dims', 64) + default_d_dims = self.options['d_dims'] = self.options.get('d_dims', None) + default_d_mask_dims = self.options['d_mask_dims'] = self.options.get('d_mask_dims', None) + default_masked_training = self.options['masked_training'] = self.load_or_def_option('masked_training', True) + default_eyes_mouth_prio = self.options['eyes_mouth_prio'] = self.load_or_def_option('eyes_mouth_prio', True) + default_uniform_yaw = self.options['uniform_yaw'] = self.load_or_def_option('uniform_yaw', False) + + lr_dropout = self.load_or_def_option('lr_dropout', 'n') + lr_dropout = {True:'y', False:'n'}.get(lr_dropout, lr_dropout) #backward comp + default_lr_dropout = self.options['lr_dropout'] = lr_dropout + + default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True) + default_ct_mode = self.options['ct_mode'] = self.load_or_def_option('ct_mode', 'none') + default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False) + + ask_override = self.ask_override() + if self.is_first_run() or ask_override: + self.ask_autobackup_hour() + self.ask_write_preview_history() + self.ask_target_iter() + self.ask_random_src_flip() + self.ask_random_dst_flip() + self.ask_batch_size(suggest_batch_size) + + if self.is_first_run(): + resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 32 .") + resolution = np.clip ( (resolution // 32) * 32, min_res, max_res) + self.options['resolution'] = resolution + self.options['face_type'] = io.input_str ("Face type", default_face_type, ['wf','head'], help_message="whole face / head").lower() + + + default_d_dims = self.options['d_dims'] = self.load_or_def_option('d_dims', 64) + + default_d_mask_dims = default_d_dims // 3 + default_d_mask_dims += default_d_mask_dims % 2 + default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims) + + if self.is_first_run(): + self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 ) + + e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 ) + self.options['e_dims'] = e_dims + e_dims % 2 + + d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 ) + self.options['d_dims'] = d_dims + d_dims % 2 + + d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 ) + self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2 + + if self.is_first_run() or ask_override: + if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head': + self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.") + + self.options['eyes_mouth_prio'] = io.input_bool ("Eyes and mouth priority", default_eyes_mouth_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction. Also makes the detail of the teeth higher.') + self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.') + + default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0) + default_gan_patch_size = self.options['gan_patch_size'] = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8) + default_gan_dims = self.options['gan_dims'] = self.load_or_def_option('gan_dims', 16) + + if self.is_first_run() or ask_override: + self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.") + + self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.") + + self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.") + + self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 1.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with lr_dropout(on) and random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 1.0 ) + + if self.options['gan_power'] != 0.0: + gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 ) + self.options['gan_patch_size'] = gan_patch_size + + gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 ) + self.options['gan_dims'] = gan_dims + + self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.") + self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.") + + self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims']) + + #override + def on_initialize(self): + device_config = nn.getCurrentDeviceConfig() + devices = device_config.devices + self.model_data_format = "NCHW"# if len(devices) != 0 and not self.is_debug() else "NHWC" + nn.initialize(data_format=self.model_data_format) + tf = nn.tf + + self.resolution = resolution = self.options['resolution'] + + lowest_dense_res = self.lowest_dense_res = resolution // 32 + + class Downscale(nn.ModelBase): + def __init__(self, in_ch, out_ch, kernel_size=5, *kwargs ): + self.in_ch = in_ch + self.out_ch = out_ch + self.kernel_size = kernel_size + super().__init__(*kwargs) + + def on_build(self, *args, **kwargs ): + self.conv1 = nn.Conv2D( self.in_ch, self.out_ch, kernel_size=self.kernel_size, strides=2, padding='SAME') + + def forward(self, x): + x = self.conv1(x) + x = tf.nn.leaky_relu(x, 0.1) + return x + + def get_out_ch(self): + return self.out_ch + + class Upscale(nn.ModelBase): + def on_build(self, in_ch, out_ch, kernel_size=3 ): + self.conv1 = nn.Conv2D( in_ch, out_ch*4, kernel_size=kernel_size, padding='SAME') + + def forward(self, x): + x = self.conv1(x) + x = tf.nn.leaky_relu(x, 0.1) + x = nn.depth_to_space(x, 2) + return x + + class ResidualBlock(nn.ModelBase): + def on_build(self, ch, kernel_size=3 ): + self.conv1 = nn.Conv2D( ch, ch, kernel_size=kernel_size, padding='SAME') + self.conv2 = nn.Conv2D( ch, ch, kernel_size=kernel_size, padding='SAME') + + def forward(self, inp): + x = self.conv1(inp) + x = tf.nn.leaky_relu(x, 0.2) + x = self.conv2(x) + x = tf.nn.leaky_relu(inp+x, 0.2) + return x + + class Encoder(nn.ModelBase): + def on_build(self, in_ch, e_ch, ae_ch): + self.down1 = Downscale(in_ch, e_ch, kernel_size=5) + self.res1 = ResidualBlock(e_ch) + self.down2 = Downscale(e_ch, e_ch*2, kernel_size=5) + self.down3 = Downscale(e_ch*2, e_ch*4, kernel_size=5) + self.down4 = Downscale(e_ch*4, e_ch*8, kernel_size=5) + self.down5 = Downscale(e_ch*8, e_ch*8, kernel_size=5) + self.res5 = ResidualBlock(e_ch*8) + self.dense1 = nn.Dense( lowest_dense_res*lowest_dense_res*e_ch*8, ae_ch ) + + def forward(self, inp): + x = inp + x = self.down1(x) + x = self.res1(x) + x = self.down2(x) + x = self.down3(x) + x = self.down4(x) + x = self.down5(x) + x = self.res5(x) + x = nn.flatten(x) + x = nn.pixel_norm(x, axes=-1) + x = self.dense1(x) + return x + + + class Inter(nn.ModelBase): + def __init__(self, ae_ch, ae_out_ch, **kwargs): + self.ae_ch, self.ae_out_ch = ae_ch, ae_out_ch + super().__init__(**kwargs) + + def on_build(self): + ae_ch, ae_out_ch = self.ae_ch, self.ae_out_ch + self.dense2 = nn.Dense( ae_ch, lowest_dense_res * lowest_dense_res * ae_out_ch ) + + def forward(self, inp): + x = inp + x = self.dense2(x) + x = nn.reshape_4D (x, lowest_dense_res, lowest_dense_res, self.ae_out_ch) + return x + + def get_out_ch(self): + return self.ae_out_ch + + class Decoder(nn.ModelBase): + def on_build(self, in_ch, d_ch, d_mask_ch ): + self.upscale0 = Upscale(in_ch, d_ch*8, kernel_size=3) + self.upscale1 = Upscale(d_ch*8, d_ch*8, kernel_size=3) + self.upscale2 = Upscale(d_ch*8, d_ch*4, kernel_size=3) + self.upscale3 = Upscale(d_ch*4, d_ch*2, kernel_size=3) + + self.res0 = ResidualBlock(d_ch*8, kernel_size=3) + self.res1 = ResidualBlock(d_ch*8, kernel_size=3) + self.res2 = ResidualBlock(d_ch*4, kernel_size=3) + self.res3 = ResidualBlock(d_ch*2, kernel_size=3) + + self.upscalem0 = Upscale(in_ch, d_mask_ch*8, kernel_size=3) + self.upscalem1 = Upscale(d_mask_ch*8, d_mask_ch*8, kernel_size=3) + self.upscalem2 = Upscale(d_mask_ch*8, d_mask_ch*4, kernel_size=3) + self.upscalem3 = Upscale(d_mask_ch*4, d_mask_ch*2, kernel_size=3) + self.upscalem4 = Upscale(d_mask_ch*2, d_mask_ch*1, kernel_size=3) + self.out_convm = nn.Conv2D( d_mask_ch*1, 1, kernel_size=1, padding='SAME') + + self.out_conv = nn.Conv2D( d_ch*2, 3, kernel_size=1, padding='SAME') + self.out_conv1 = nn.Conv2D( d_ch*2, 3, kernel_size=3, padding='SAME') + self.out_conv2 = nn.Conv2D( d_ch*2, 3, kernel_size=3, padding='SAME') + self.out_conv3 = nn.Conv2D( d_ch*2, 3, kernel_size=3, padding='SAME') + + def forward(self, inp): + z = inp + + x = self.upscale0(z) + x = self.res0(x) + x = self.upscale1(x) + x = self.res1(x) + x = self.upscale2(x) + x = self.res2(x) + x = self.upscale3(x) + x = self.res3(x) + + x = tf.nn.sigmoid( nn.depth_to_space(tf.concat( (self.out_conv(x), + self.out_conv1(x), + self.out_conv2(x), + self.out_conv3(x)), nn.conv2d_ch_axis), 2) ) + + m = self.upscalem0(z) + m = self.upscalem1(m) + m = self.upscalem2(m) + m = self.upscalem3(m) + m = self.upscalem4(m) + m = tf.nn.sigmoid(self.out_convm(m)) + return x, m + + + + self.face_type = {'wf' : FaceType.WHOLE_FACE, + 'head' : FaceType.HEAD}[ self.options['face_type'] ] + + if 'eyes_prio' in self.options: + self.options.pop('eyes_prio') + + eyes_mouth_prio = self.options['eyes_mouth_prio'] + + ae_dims = self.ae_dims = self.options['ae_dims'] + e_dims = self.options['e_dims'] + d_dims = self.options['d_dims'] + d_mask_dims = self.options['d_mask_dims'] + + self.gan_power = gan_power = self.options['gan_power'] + random_warp = self.options['random_warp'] + random_src_flip = self.random_src_flip + random_dst_flip = self.random_dst_flip + + masked_training = self.options['masked_training'] + ct_mode = self.options['ct_mode'] + if ct_mode == 'none': + ct_mode = None + + + models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu'] + models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0' + optimizer_vars_on_cpu = models_opt_device=='/CPU:0' + + input_ch=3 + bgr_shape = self.bgr_shape = nn.get4Dshape(resolution,resolution,input_ch) + mask_shape = nn.get4Dshape(resolution,resolution,1) + self.model_filename_list = [] + + with tf.device ('/CPU:0'): + #Place holders on CPU + self.warped_src = tf.placeholder (nn.floatx, bgr_shape, name='warped_src') + self.warped_dst = tf.placeholder (nn.floatx, bgr_shape, name='warped_dst') + + self.target_src = tf.placeholder (nn.floatx, bgr_shape, name='target_src') + self.target_dst = tf.placeholder (nn.floatx, bgr_shape, name='target_dst') + + self.target_srcm = tf.placeholder (nn.floatx, mask_shape, name='target_srcm') + self.target_srcm_em = tf.placeholder (nn.floatx, mask_shape, name='target_srcm_em') + self.target_dstm = tf.placeholder (nn.floatx, mask_shape, name='target_dstm') + self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape, name='target_dstm_em') + + self.morph_value_t = tf.placeholder (nn.floatx, (1,), name='morph_value_t') + + # Initializing model classes + + with tf.device (models_opt_device): + self.encoder = Encoder(in_ch=input_ch, e_ch=e_dims, ae_ch=ae_dims, name='encoder') + self.inter_src = Inter(ae_ch=ae_dims, ae_out_ch=ae_dims, name='inter_src') + self.inter_dst = Inter(ae_ch=ae_dims, ae_out_ch=ae_dims, name='inter_dst') + self.decoder = Decoder(in_ch=ae_dims, d_ch=d_dims, d_mask_ch=d_mask_dims, name='decoder') + + self.model_filename_list += [ [self.encoder, 'encoder.npy'], + [self.inter_src, 'inter_src.npy'], + [self.inter_dst , 'inter_dst.npy'], + [self.decoder , 'decoder.npy'] ] + + if self.is_training: + if gan_power != 0: + self.GAN = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], name="GAN") + self.model_filename_list += [ [self.GAN, 'GAN.npy'] ] + + # Initialize optimizers + lr=5e-5 + lr_dropout = 0.3 if self.options['lr_dropout'] in ['y','cpu'] else 1.0 + clipnorm = 1.0 if self.options['clipgrad'] else 0.0 + + self.src_dst_trainable_weights = self.encoder.get_weights() + self.inter_src.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights() + + self.src_dst_opt = nn.AdaBelief(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='src_dst_opt') + self.src_dst_opt.initialize_variables (self.src_dst_trainable_weights, vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu') + self.model_filename_list += [ (self.src_dst_opt, 'src_dst_opt.npy') ] + + if gan_power != 0: + self.GAN_opt = nn.AdaBelief(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='GAN_opt') + self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights() + self.model_filename_list += [ (self.GAN_opt, 'GAN_opt.npy') ] + + if self.is_training: + # Adjust batch size for multiple GPU + gpu_count = max(1, len(devices) ) + bs_per_gpu = max(1, self.get_batch_size() // gpu_count) + self.set_batch_size( gpu_count*bs_per_gpu) + + # Compute losses per GPU + gpu_pred_src_src_list = [] + gpu_pred_dst_dst_list = [] + gpu_pred_src_dst_list = [] + gpu_pred_src_srcm_list = [] + gpu_pred_dst_dstm_list = [] + gpu_pred_src_dstm_list = [] + + gpu_src_losses = [] + gpu_dst_losses = [] + gpu_G_loss_gvs = [] + gpu_GAN_loss_gvs = [] + gpu_D_code_loss_gvs = [] + gpu_D_src_dst_loss_gvs = [] + + for gpu_id in range(gpu_count): + with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): + with tf.device(f'/CPU:0'): + # slice on CPU, otherwise all batch data will be transfered to GPU first + batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) + gpu_warped_src = self.warped_src [batch_slice,:,:,:] + gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] + gpu_target_src = self.target_src [batch_slice,:,:,:] + gpu_target_dst = self.target_dst [batch_slice,:,:,:] + gpu_target_srcm = self.target_srcm[batch_slice,:,:,:] + gpu_target_srcm_em = self.target_srcm_em[batch_slice,:,:,:] + gpu_target_dstm = self.target_dstm[batch_slice,:,:,:] + gpu_target_dstm_em = self.target_dstm_em[batch_slice,:,:,:] + + # process model tensors + gpu_src_code = self.encoder (gpu_warped_src) + gpu_dst_code = self.encoder (gpu_warped_dst) + + gpu_src_inter_src_code = self.inter_src (gpu_src_code) + gpu_src_inter_dst_code = self.inter_dst (gpu_src_code) + gpu_dst_inter_src_code = self.inter_src (gpu_dst_code) + gpu_dst_inter_dst_code = self.inter_dst (gpu_dst_code) + + inter_rnd_binomial = nn.random_binomial( [bs_per_gpu, gpu_src_inter_src_code.shape.as_list()[1], 1,1] , p=0.33) + gpu_src_code = gpu_src_inter_src_code * inter_rnd_binomial + gpu_src_inter_dst_code * (1-inter_rnd_binomial) + gpu_dst_code = gpu_dst_inter_dst_code + + ae_dims_slice = tf.cast(ae_dims*self.morph_value_t[0], tf.int32) + gpu_src_dst_code = tf.concat( ( tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]), + tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 ) + + gpu_pred_src_src, gpu_pred_src_srcm = self.decoder(gpu_src_code) + gpu_pred_dst_dst, gpu_pred_dst_dstm = self.decoder(gpu_dst_code) + gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code) + + gpu_pred_src_src_list.append(gpu_pred_src_src) + gpu_pred_dst_dst_list.append(gpu_pred_dst_dst) + gpu_pred_src_dst_list.append(gpu_pred_src_dst) + + gpu_pred_src_srcm_list.append(gpu_pred_src_srcm) + gpu_pred_dst_dstm_list.append(gpu_pred_dst_dstm) + gpu_pred_src_dstm_list.append(gpu_pred_src_dstm) + + gpu_target_srcm_blur = nn.gaussian_blur(gpu_target_srcm, max(1, resolution // 32) ) + gpu_target_srcm_blur = tf.clip_by_value(gpu_target_srcm_blur, 0, 0.5) * 2 + + gpu_target_dstm_blur = nn.gaussian_blur(gpu_target_dstm, max(1, resolution // 32) ) + gpu_target_dstm_blur = tf.clip_by_value(gpu_target_dstm_blur, 0, 0.5) * 2 + + gpu_target_dst_anti_masked = gpu_target_dst*(1.0-gpu_target_dstm_blur) + gpu_target_src_anti_masked = gpu_target_src*(1.0-gpu_target_srcm_blur) + gpu_target_src_masked_opt = gpu_target_src*gpu_target_srcm_blur if masked_training else gpu_target_src + gpu_target_dst_masked_opt = gpu_target_dst*gpu_target_dstm_blur if masked_training else gpu_target_dst + + gpu_pred_src_src_masked_opt = gpu_pred_src_src*gpu_target_srcm_blur if masked_training else gpu_pred_src_src + gpu_pred_src_src_anti_masked = gpu_pred_src_src*(1.0-gpu_target_srcm_blur) + gpu_pred_dst_dst_masked_opt = gpu_pred_dst_dst*gpu_target_dstm_blur if masked_training else gpu_pred_dst_dst + gpu_pred_dst_dst_anti_masked = gpu_pred_dst_dst*(1.0-gpu_target_dstm_blur) + + if resolution < 256: + gpu_src_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1]) + else: + gpu_src_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1]) + gpu_src_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1]) + gpu_src_loss += tf.reduce_mean ( 10*tf.square ( gpu_target_src_masked_opt - gpu_pred_src_src_masked_opt ), axis=[1,2,3]) + + if eyes_mouth_prio: + gpu_src_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_src*gpu_target_srcm_em - gpu_pred_src_src*gpu_target_srcm_em ), axis=[1,2,3]) + + gpu_src_loss += tf.reduce_mean ( 10*tf.square( gpu_target_srcm - gpu_pred_src_srcm ),axis=[1,2,3] ) + + if resolution < 256: + gpu_dst_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1]) + else: + gpu_dst_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1]) + gpu_dst_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/23.2) ), axis=[1]) + gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dst_masked_opt- gpu_pred_dst_dst_masked_opt ), axis=[1,2,3]) + + if eyes_mouth_prio: + gpu_dst_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_dst*gpu_target_dstm_em - gpu_pred_dst_dst*gpu_target_dstm_em ), axis=[1,2,3]) + + gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dstm - gpu_pred_dst_dstm ),axis=[1,2,3] ) + + + gpu_dst_loss += 0.1*tf.reduce_mean(tf.square(gpu_pred_dst_dst_anti_masked-gpu_target_dst_anti_masked),axis=[1,2,3] ) + + gpu_src_losses += [gpu_src_loss] + gpu_dst_losses += [gpu_dst_loss] + + gpu_G_loss = gpu_src_loss + gpu_dst_loss + + def DLossOnes(logits): + return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits), logits=logits), axis=[1,2,3]) + + def DLossZeros(logits): + return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits), logits=logits), axis=[1,2,3]) + + + if gan_power != 0: + gpu_pred_src_src_d, gpu_pred_src_src_d2 = self.GAN(gpu_pred_src_src_masked_opt) + gpu_pred_dst_dst_d, gpu_pred_dst_dst_d2 = self.GAN(gpu_pred_dst_dst_masked_opt) + gpu_target_src_d, gpu_target_src_d2 = self.GAN(gpu_target_src_masked_opt) + gpu_target_dst_d, gpu_target_dst_d2 = self.GAN(gpu_target_dst_masked_opt) + + gpu_D_src_dst_loss = (DLossOnes (gpu_target_src_d) + DLossOnes (gpu_target_src_d2) + \ + DLossZeros(gpu_pred_src_src_d) + DLossZeros(gpu_pred_src_src_d2) + \ + DLossOnes (gpu_target_dst_d) + DLossOnes (gpu_target_dst_d2) + \ + DLossZeros(gpu_pred_dst_dst_d) + DLossZeros(gpu_pred_dst_dst_d2) + ) * ( 1.0 / 8) + + gpu_D_src_dst_loss_gvs += [ nn.gradients (gpu_D_src_dst_loss, self.GAN.get_weights() ) ] + + gpu_G_loss += (DLossOnes(gpu_pred_src_src_d) + DLossOnes(gpu_pred_src_src_d2) + \ + DLossOnes(gpu_pred_dst_dst_d) + DLossOnes(gpu_pred_dst_dst_d2) + ) * gan_power + + if masked_training: + # Minimal src-src-bg rec with total_variation_mse to suppress random bright dots from gan + gpu_G_loss += 0.000001*nn.total_variation_mse(gpu_pred_src_src) + gpu_G_loss += 0.02*tf.reduce_mean(tf.square(gpu_pred_src_src_anti_masked-gpu_target_src_anti_masked),axis=[1,2,3] ) + + gpu_G_loss_gvs += [ nn.gradients ( gpu_G_loss, self.src_dst_trainable_weights ) ] + + + # Average losses and gradients, and create optimizer update ops + with tf.device(f'/CPU:0'): + pred_src_src = nn.concat(gpu_pred_src_src_list, 0) + pred_dst_dst = nn.concat(gpu_pred_dst_dst_list, 0) + pred_src_dst = nn.concat(gpu_pred_src_dst_list, 0) + pred_src_srcm = nn.concat(gpu_pred_src_srcm_list, 0) + pred_dst_dstm = nn.concat(gpu_pred_dst_dstm_list, 0) + pred_src_dstm = nn.concat(gpu_pred_src_dstm_list, 0) + + with tf.device (models_opt_device): + src_loss = tf.concat(gpu_src_losses, 0) + dst_loss = tf.concat(gpu_dst_losses, 0) + src_dst_loss_gv_op = self.src_dst_opt.get_update_op (nn.average_gv_list (gpu_G_loss_gvs)) + + if gan_power != 0: + src_D_src_dst_loss_gv_op = self.GAN_opt.get_update_op (nn.average_gv_list(gpu_D_src_dst_loss_gvs) ) + #GAN_loss_gv_op = self.src_dst_opt.get_update_op (nn.average_gv_list(gpu_GAN_loss_gvs) ) + + + # Initializing training and view functions + def src_dst_train(warped_src, target_src, target_srcm, target_srcm_em, \ + warped_dst, target_dst, target_dstm, target_dstm_em, ): + s, d, _ = nn.tf_sess.run ( [ src_loss, dst_loss, src_dst_loss_gv_op], + feed_dict={self.warped_src :warped_src, + self.target_src :target_src, + self.target_srcm:target_srcm, + self.target_srcm_em:target_srcm_em, + self.warped_dst :warped_dst, + self.target_dst :target_dst, + self.target_dstm:target_dstm, + self.target_dstm_em:target_dstm_em, + }) + return s, d + self.src_dst_train = src_dst_train + + if gan_power != 0: + def D_src_dst_train(warped_src, target_src, target_srcm, target_srcm_em, \ + warped_dst, target_dst, target_dstm, target_dstm_em, ): + nn.tf_sess.run ([src_D_src_dst_loss_gv_op], feed_dict={self.warped_src :warped_src, + self.target_src :target_src, + self.target_srcm:target_srcm, + self.target_srcm_em:target_srcm_em, + self.warped_dst :warped_dst, + self.target_dst :target_dst, + self.target_dstm:target_dstm, + self.target_dstm_em:target_dstm_em}) + self.D_src_dst_train = D_src_dst_train + + + def AE_view(warped_src, warped_dst, morph_value): + return nn.tf_sess.run ( [pred_src_src, pred_dst_dst, pred_dst_dstm, pred_src_dst, pred_src_dstm], + feed_dict={self.warped_src:warped_src, self.warped_dst:warped_dst, self.morph_value_t:[morph_value] }) + + self.AE_view = AE_view + else: + #Initializing merge function + with tf.device( nn.tf_default_device_name if len(devices) != 0 else f'/CPU:0'): + gpu_dst_code = self.encoder (self.warped_dst) + gpu_dst_inter_src_code = self.inter_src ( gpu_dst_code) + gpu_dst_inter_dst_code = self.inter_dst ( gpu_dst_code) + + ae_dims_slice = tf.cast(ae_dims*self.morph_value_t[0], tf.int32) + gpu_src_dst_code = tf.concat( ( tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]), + tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 ) + + gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_dst_inter_src_code) + _, gpu_pred_dst_dstm = self.decoder(gpu_dst_inter_dst_code) + + def AE_merge(warped_dst, morph_value): + return nn.tf_sess.run ( [gpu_pred_src_dst, gpu_pred_dst_dstm, gpu_pred_src_dstm], feed_dict={self.warped_dst:warped_dst, self.morph_value_t:[morph_value] }) + + self.AE_merge = AE_merge + + # Loading/initializing all models/optimizers weights + for model, filename in io.progress_bar_generator(self.model_filename_list, "Initializing models"): + do_init = self.is_first_run() + if self.is_training and gan_power != 0 and model == self.GAN: + if self.gan_model_changed: + do_init = True + if not do_init: + do_init = not model.load_weights( self.get_strpath_storage_for_file(filename) ) + if do_init: + model.init_weights() + + + ############### + + # initializing sample generators + if self.is_training: + training_data_src_path = self.training_data_src_path + training_data_dst_path = self.training_data_dst_path + + random_ct_samples_path=training_data_dst_path if ct_mode is not None else None + + cpu_count = min(multiprocessing.cpu_count(), 8) + src_generators_count = cpu_count // 2 + dst_generators_count = cpu_count // 2 + if ct_mode is not None: + src_generators_count = int(src_generators_count * 1.5) + + self.set_training_data_generators ([ + SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(), + sample_process_options=SampleProcessor.Options(random_flip=random_src_flip), + output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.EYES_MOUTH, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + ], + uniform_yaw_distribution=self.options['uniform_yaw'], + generators_count=src_generators_count ), + + SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), + sample_process_options=SampleProcessor.Options(random_flip=random_dst_flip), + output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.EYES_MOUTH, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, + ], + uniform_yaw_distribution=self.options['uniform_yaw'], + generators_count=dst_generators_count ) + ]) + + self.last_src_samples_loss = [] + self.last_dst_samples_loss = [] + + + def dump_ckpt(self): + tf = nn.tf + + + with tf.device ('/CPU:0'): + warped_dst = tf.placeholder (nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face') + warped_dst = tf.transpose(warped_dst, (0,3,1,2)) + morph_value = tf.placeholder (nn.floatx, (1,), name='morph_value') + + gpu_dst_code = self.encoder (warped_dst) + gpu_dst_inter_src_code = self.inter_src ( gpu_dst_code) + gpu_dst_inter_dst_code = self.inter_dst ( gpu_dst_code) + + ae_dims_slice = tf.cast(self.ae_dims*morph_value[0], tf.int32) + gpu_src_dst_code = tf.concat( (tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , self.lowest_dense_res, self.lowest_dense_res]), + tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,self.ae_dims-ae_dims_slice, self.lowest_dense_res,self.lowest_dense_res]) ), 1 ) + + gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code) + _, gpu_pred_dst_dstm = self.decoder(gpu_dst_inter_dst_code) + + gpu_pred_src_dst = tf.transpose(gpu_pred_src_dst, (0,2,3,1)) + gpu_pred_dst_dstm = tf.transpose(gpu_pred_dst_dstm, (0,2,3,1)) + gpu_pred_src_dstm = tf.transpose(gpu_pred_src_dstm, (0,2,3,1)) + + + saver = tf.train.Saver() + tf.identity(gpu_pred_dst_dstm, name='out_face_mask') + tf.identity(gpu_pred_src_dst, name='out_celeb_face') + tf.identity(gpu_pred_src_dstm, name='out_celeb_face_mask') + + saver.save(nn.tf_sess, self.get_strpath_storage_for_file('.ckpt') ) + + + #override + def get_model_filename_list(self): + return self.model_filename_list + + #override + def onSave(self): + for model, filename in io.progress_bar_generator(self.get_model_filename_list(), "Saving", leave=False): + model.save_weights ( self.get_strpath_storage_for_file(filename) ) + + #override + def should_save_preview_history(self): + return (not io.is_colab() and self.iter % ( 10*(max(1,self.resolution // 64)) ) == 0) or \ + (io.is_colab() and self.iter % 100 == 0) + + #override + def onTrainOneIter(self): + bs = self.get_batch_size() + + ( (warped_src, target_src, target_srcm, target_srcm_em), \ + (warped_dst, target_dst, target_dstm, target_dstm_em) ) = self.generate_next_samples() + + src_loss, dst_loss = self.src_dst_train (warped_src, target_src, target_srcm, target_srcm_em, warped_dst, target_dst, target_dstm, target_dstm_em) + + for i in range(bs): + self.last_src_samples_loss.append ( (target_src[i], target_srcm[i], target_srcm_em[i], src_loss[i] ) ) + self.last_dst_samples_loss.append ( (target_dst[i], target_dstm[i], target_dstm_em[i], dst_loss[i] ) ) + + if len(self.last_src_samples_loss) >= bs*16: + src_samples_loss = sorted(self.last_src_samples_loss, key=operator.itemgetter(3), reverse=True) + dst_samples_loss = sorted(self.last_dst_samples_loss, key=operator.itemgetter(3), reverse=True) + + target_src = np.stack( [ x[0] for x in src_samples_loss[:bs] ] ) + target_srcm = np.stack( [ x[1] for x in src_samples_loss[:bs] ] ) + target_srcm_em = np.stack( [ x[2] for x in src_samples_loss[:bs] ] ) + + target_dst = np.stack( [ x[0] for x in dst_samples_loss[:bs] ] ) + target_dstm = np.stack( [ x[1] for x in dst_samples_loss[:bs] ] ) + target_dstm_em = np.stack( [ x[2] for x in dst_samples_loss[:bs] ] ) + + src_loss, dst_loss = self.src_dst_train (target_src, target_src, target_srcm, target_srcm_em, target_dst, target_dst, target_dstm, target_dstm_em) + self.last_src_samples_loss = [] + self.last_dst_samples_loss = [] + + if self.gan_power != 0: + self.D_src_dst_train (warped_src, target_src, target_srcm, target_srcm_em, warped_dst, target_dst, target_dstm, target_dstm_em) + + return ( ('src_loss', np.mean(src_loss) ), ('dst_loss', np.mean(dst_loss) ), ) + + #override + def onGetPreview(self, samples): + ( (warped_src, target_src, target_srcm, target_srcm_em), + (warped_dst, target_dst, target_dstm, target_dstm_em) ) = samples + + S, D, SS, DD, DDM_000, _, _ = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in ([target_src,target_dst] + self.AE_view (target_src, target_dst, 0.0) ) ] + + _, _, DDM_025, SD_025, SDM_025 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.25) ] + _, _, DDM_050, SD_050, SDM_050 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.50) ] + _, _, DDM_065, SD_065, SDM_065 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.65) ] + _, _, DDM_075, SD_075, SDM_075 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.75) ] + _, _, DDM_100, SD_100, SDM_100 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 1.00) ] + + (DDM_000, + DDM_025, SDM_025, + DDM_050, SDM_050, + DDM_065, SDM_065, + DDM_075, SDM_075, + DDM_100, SDM_100) = [ np.repeat (x, (3,), -1) for x in (DDM_000, + DDM_025, SDM_025, + DDM_050, SDM_050, + DDM_065, SDM_065, + DDM_075, SDM_075, + DDM_100, SDM_100) ] + + target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )] + + n_samples = min(4, self.get_batch_size(), 800 // self.resolution ) + + result = [] + + i = np.random.randint(n_samples) + + st = [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ] + st += [ np.concatenate ((SS[i], DD[i], SD_065[i] ), axis=1) ] + + result += [ ('AMP morph 0.65', np.concatenate (st, axis=0 )), ] + + st = [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ] + st += [ np.concatenate ((SD_065[i], SD_075[i], SD_100[i]), axis=1) ] + result += [ ('AMP morph list', np.concatenate (st, axis=0 )), ] + + + st = [ np.concatenate ((DD[i], SD_025[i]*DDM_025[i]*SDM_025[i], SD_050[i]*DDM_050[i]*SDM_050[i]), axis=1) ] + st += [ np.concatenate ((SD_065[i]*DDM_065[i]*SDM_065[i], SD_075[i]*DDM_075[i]*SDM_075[i], SD_100[i]*DDM_100[i]*SDM_100[i]), axis=1) ] + result += [ ('AMP morph list masked', np.concatenate (st, axis=0 )), ] + + return result + + def predictor_func (self, face, morph_value): + face = nn.to_data_format(face[None,...], self.model_data_format, "NHWC") + + bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face, morph_value) ] + + return bgr[0], mask_src_dstm[0][...,0], mask_dst_dstm[0][...,0] + + #override + def get_MergerConfig(self): + morph_factor = np.clip ( io.input_number ("Morph factor", 0.65, add_info="0.0 .. 1.0"), 0.0, 1.0 ) + + def predictor_morph(face): + return self.predictor_func(face, morph_factor) + + + import merger + return predictor_morph, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type, default_mode = 'overlay') + +Model = AMPModel diff --git a/models/Model_AMP/__init__.py b/models/Model_AMP/__init__.py new file mode 100644 index 0000000..0188f11 --- /dev/null +++ b/models/Model_AMP/__init__.py @@ -0,0 +1 @@ +from .Model import Model From 11a799323830513faffa509cf9c2a4af0eddef56 Mon Sep 17 00:00:00 2001 From: iperov Date: Sat, 29 May 2021 22:23:07 +0400 Subject: [PATCH 41/47] fix --- models/Model_AMP/Model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 95f08bb..6b575f3 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -732,9 +732,9 @@ class AMPModel(ModelBase): i = np.random.randint(n_samples) st = [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ] - st += [ np.concatenate ((SS[i], DD[i], SD_065[i] ), axis=1) ] + st += [ np.concatenate ((SS[i], DD[i], SD_075[i] ), axis=1) ] - result += [ ('AMP morph 0.65', np.concatenate (st, axis=0 )), ] + result += [ ('AMP morph 0.75', np.concatenate (st, axis=0 )), ] st = [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ] st += [ np.concatenate ((SD_065[i], SD_075[i], SD_100[i]), axis=1) ] @@ -756,7 +756,7 @@ class AMPModel(ModelBase): #override def get_MergerConfig(self): - morph_factor = np.clip ( io.input_number ("Morph factor", 0.65, add_info="0.0 .. 1.0"), 0.0, 1.0 ) + morph_factor = np.clip ( io.input_number ("Morph factor", 0.75, add_info="0.0 .. 1.0"), 0.0, 1.0 ) def predictor_morph(face): return self.predictor_func(face, morph_factor) From e52b53f87c38b0dad3fca8dd9a76c15fcda63e46 Mon Sep 17 00:00:00 2001 From: iperov Date: Sun, 30 May 2021 09:24:23 +0400 Subject: [PATCH 42/47] AMP fix --- core/leras/ops/__init__.py | 9 +++------ models/Model_AMP/Model.py | 5 +---- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/core/leras/ops/__init__.py b/core/leras/ops/__init__.py index 09e8e7a..0cf23a8 100644 --- a/core/leras/ops/__init__.py +++ b/core/leras/ops/__init__.py @@ -333,7 +333,9 @@ def depth_to_space(x, size): x = tf.reshape(x, (-1, oh, ow, oc, )) return x else: - return tf.depth_to_space(x, size, data_format=nn.data_format) + cfg = nn.getCurrentDeviceConfig() + if not cfg.cpu_only: + return tf.depth_to_space(x, size, data_format=nn.data_format) b,c,h,w = x.shape.as_list() oh, ow = h * size, w * size oc = c // (size * size) @@ -344,11 +346,6 @@ def depth_to_space(x, size): return x nn.depth_to_space = depth_to_space -def pixel_norm(x, power = 1.0): - return x * power * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=nn.conv2d_spatial_axes, keepdims=True) + 1e-06) -nn.pixel_norm = pixel_norm - - def rgb_to_lab(srgb): srgb_pixels = tf.reshape(srgb, [-1, 3]) linear_mask = tf.cast(srgb_pixels <= 0.04045, dtype=tf.float32) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 6b575f3..ab5b8e1 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -121,7 +121,7 @@ class AMPModel(ModelBase): def on_initialize(self): device_config = nn.getCurrentDeviceConfig() devices = device_config.devices - self.model_data_format = "NCHW"# if len(devices) != 0 and not self.is_debug() else "NHWC" + self.model_data_format = "NCHW" nn.initialize(data_format=self.model_data_format) tf = nn.tf @@ -262,8 +262,6 @@ class AMPModel(ModelBase): m = tf.nn.sigmoid(self.out_convm(m)) return x, m - - self.face_type = {'wf' : FaceType.WHOLE_FACE, 'head' : FaceType.HEAD}[ self.options['face_type'] ] @@ -287,7 +285,6 @@ class AMPModel(ModelBase): if ct_mode == 'none': ct_mode = None - models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu'] models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0' optimizer_vars_on_cpu = models_opt_device=='/CPU:0' From 315f241c5129112ec7e4cc2dff8cf7d0910a7002 Mon Sep 17 00:00:00 2001 From: iperov Date: Sun, 30 May 2021 14:28:23 +0400 Subject: [PATCH 43/47] upd magnet link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3805772..575c866 100644 --- a/README.md +++ b/README.md @@ -194,7 +194,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul -Windows (magnet link) +Windows (magnet link) Last release. Use torrent client to download. From 1981ed0ca8d8e374264d6649a9c5546d7bb88a83 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 1 Jun 2021 00:21:27 +0400 Subject: [PATCH 44/47] _ --- mainscripts/Trainer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mainscripts/Trainer.py b/mainscripts/Trainer.py index 66afd71..780cc48 100644 --- a/mainscripts/Trainer.py +++ b/mainscripts/Trainer.py @@ -43,7 +43,10 @@ def trainerThread (s2c, c2s, e, if not saved_models_path.exists(): saved_models_path.mkdir(exist_ok=True, parents=True) - + + if dump_ckpt: + cpu_only=True + model = models.import_model(model_class_name)( is_training=not dump_ckpt, saved_models_path=saved_models_path, From 34b62862e039c659cd4b5afc5b3af391f409f715 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 1 Jun 2021 09:36:53 +0400 Subject: [PATCH 45/47] fix help msg --- models/Model_SAEHD/Model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index eb89172..b9b1c42 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -170,7 +170,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.") self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.") - self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=Y, random_flips=Y, gan_power=0.0, lr_dropout=N, styles=0.0, uniform_yaw=Y") + self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, styles=0.0, uniform_yaw=Y") if self.options['pretrain'] and self.get_pretraining_data_path() is None: raise Exception("pretraining_data_path is not defined") From 5fac5ee4f32332860ba498ebbba4b3094dff1db4 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 1 Jun 2021 09:37:55 +0400 Subject: [PATCH 46/47] =?UTF-8?q?AMP=20model:=20added=20=E2=80=98morph=5Ff?= =?UTF-8?q?actor=E2=80=99=20option.=20[0.1=20..=200.5]=20The=20smaller=20t?= =?UTF-8?q?he=20value,=20the=20more=20src-like=20facial=20expressions=20wi?= =?UTF-8?q?ll=20appear.=20The=20larger=20the=20value,=20the=20less=20space?= =?UTF-8?q?=20there=20is=20to=20train=20a=20large=20dst=20faceset=20in=20t?= =?UTF-8?q?he=20neural=20network.=20Typical=20fine=20value=20is=200.33?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AMP model: added ‘pretrain’ mode as in SAEHD Windows build: Default pretrain dataset is updated with applied Generic XSeg mask --- models/Model_AMP/Model.py | 167 ++++++++++++++++++++++++-------------- 1 file changed, 106 insertions(+), 61 deletions(-) diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index ab5b8e1..ad66365 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -39,6 +39,7 @@ class AMPModel(ModelBase): default_e_dims = self.options['e_dims'] = self.load_or_def_option('e_dims', 64) default_d_dims = self.options['d_dims'] = self.options.get('d_dims', None) default_d_mask_dims = self.options['d_mask_dims'] = self.options.get('d_mask_dims', None) + default_morph_factor = self.options['morph_factor'] = self.options.get('morph_factor', 0.33) default_masked_training = self.options['masked_training'] = self.load_or_def_option('masked_training', True) default_eyes_mouth_prio = self.options['eyes_mouth_prio'] = self.load_or_def_option('eyes_mouth_prio', True) default_uniform_yaw = self.options['uniform_yaw'] = self.load_or_def_option('uniform_yaw', False) @@ -50,6 +51,8 @@ class AMPModel(ModelBase): default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True) default_ct_mode = self.options['ct_mode'] = self.load_or_def_option('ct_mode', 'none') default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False) + default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False) + ask_override = self.ask_override() if self.is_first_run() or ask_override: @@ -84,6 +87,10 @@ class AMPModel(ModelBase): d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 ) self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2 + + morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="The smaller the value, the more src-like facial expressions will appear. The larger the value, the less space there is to train a large dst faceset in the neural network. Typical fine value is 0.33"), 0.1, 0.5 ) + self.options['morph_factor'] = morph_factor + if self.is_first_run() or ask_override: if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head': @@ -114,8 +121,11 @@ class AMPModel(ModelBase): self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.") self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.") - + + self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, uniform_yaw=Y") + self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims']) + self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False) #override def on_initialize(self): @@ -274,12 +284,23 @@ class AMPModel(ModelBase): e_dims = self.options['e_dims'] d_dims = self.options['d_dims'] d_mask_dims = self.options['d_mask_dims'] - - self.gan_power = gan_power = self.options['gan_power'] - random_warp = self.options['random_warp'] - random_src_flip = self.random_src_flip - random_dst_flip = self.random_dst_flip - + morph_factor = self.options['morph_factor'] + + pretrain = self.pretrain = self.options['pretrain'] + if self.pretrain_just_disabled: + self.set_iter(0) + + self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power'] + random_warp = False if self.pretrain else self.options['random_warp'] + random_src_flip = self.random_src_flip if not self.pretrain else True + random_dst_flip = self.random_dst_flip if not self.pretrain else True + + if self.pretrain: + self.options_show_override['gan_power'] = 0.0 + self.options_show_override['random_warp'] = False + self.options_show_override['lr_dropout'] = 'n' + self.options_show_override['uniform_yaw'] = True + masked_training = self.options['masked_training'] ct_mode = self.options['ct_mode'] if ct_mode == 'none': @@ -329,13 +350,18 @@ class AMPModel(ModelBase): # Initialize optimizers lr=5e-5 - lr_dropout = 0.3 if self.options['lr_dropout'] in ['y','cpu'] else 1.0 + lr_dropout = 0.3 if self.options['lr_dropout'] in ['y','cpu'] and not self.pretrain else 1.0 + clipnorm = 1.0 if self.options['clipgrad'] else 0.0 - self.src_dst_trainable_weights = self.encoder.get_weights() + self.inter_src.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights() + self.all_weights = self.encoder.get_weights() + self.inter_src.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights() + if pretrain: + self.trainable_weights = self.encoder.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights() + else: + self.trainable_weights = self.encoder.get_weights() + self.inter_src.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights() self.src_dst_opt = nn.AdaBelief(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='src_dst_opt') - self.src_dst_opt.initialize_variables (self.src_dst_trainable_weights, vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu') + self.src_dst_opt.initialize_variables (self.all_weights, vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu') self.model_filename_list += [ (self.src_dst_opt, 'src_dst_opt.npy') ] if gan_power != 0: @@ -381,19 +407,25 @@ class AMPModel(ModelBase): # process model tensors gpu_src_code = self.encoder (gpu_warped_src) gpu_dst_code = self.encoder (gpu_warped_dst) + + if pretrain: + gpu_src_inter_src_code = self.inter_src (gpu_src_code) + gpu_dst_inter_dst_code = self.inter_dst (gpu_dst_code) + gpu_src_code = gpu_src_inter_src_code * nn.random_binomial( [bs_per_gpu, gpu_src_inter_src_code.shape.as_list()[1], 1,1] , p=morph_factor) + gpu_dst_code = gpu_src_dst_code = gpu_dst_inter_dst_code * nn.random_binomial( [bs_per_gpu, gpu_dst_inter_dst_code.shape.as_list()[1], 1,1] , p=0.25) + else: + gpu_src_inter_src_code = self.inter_src (gpu_src_code) + gpu_src_inter_dst_code = self.inter_dst (gpu_src_code) + gpu_dst_inter_src_code = self.inter_src (gpu_dst_code) + gpu_dst_inter_dst_code = self.inter_dst (gpu_dst_code) - gpu_src_inter_src_code = self.inter_src (gpu_src_code) - gpu_src_inter_dst_code = self.inter_dst (gpu_src_code) - gpu_dst_inter_src_code = self.inter_src (gpu_dst_code) - gpu_dst_inter_dst_code = self.inter_dst (gpu_dst_code) + inter_rnd_binomial = nn.random_binomial( [bs_per_gpu, gpu_src_inter_src_code.shape.as_list()[1], 1,1] , p=morph_factor) + gpu_src_code = gpu_src_inter_src_code * inter_rnd_binomial + gpu_src_inter_dst_code * (1-inter_rnd_binomial) + gpu_dst_code = gpu_dst_inter_dst_code - inter_rnd_binomial = nn.random_binomial( [bs_per_gpu, gpu_src_inter_src_code.shape.as_list()[1], 1,1] , p=0.33) - gpu_src_code = gpu_src_inter_src_code * inter_rnd_binomial + gpu_src_inter_dst_code * (1-inter_rnd_binomial) - gpu_dst_code = gpu_dst_inter_dst_code - - ae_dims_slice = tf.cast(ae_dims*self.morph_value_t[0], tf.int32) - gpu_src_dst_code = tf.concat( ( tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]), - tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 ) + ae_dims_slice = tf.cast(ae_dims*self.morph_value_t[0], tf.int32) + gpu_src_dst_code = tf.concat( (tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]), + tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 ) gpu_pred_src_src, gpu_pred_src_srcm = self.decoder(gpu_src_code) gpu_pred_dst_dst, gpu_pred_dst_dstm = self.decoder(gpu_dst_code) @@ -422,38 +454,40 @@ class AMPModel(ModelBase): gpu_pred_src_src_anti_masked = gpu_pred_src_src*(1.0-gpu_target_srcm_blur) gpu_pred_dst_dst_masked_opt = gpu_pred_dst_dst*gpu_target_dstm_blur if masked_training else gpu_pred_dst_dst gpu_pred_dst_dst_anti_masked = gpu_pred_dst_dst*(1.0-gpu_target_dstm_blur) - - if resolution < 256: - gpu_src_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1]) - else: - gpu_src_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1]) - gpu_src_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1]) - gpu_src_loss += tf.reduce_mean ( 10*tf.square ( gpu_target_src_masked_opt - gpu_pred_src_src_masked_opt ), axis=[1,2,3]) - - if eyes_mouth_prio: - gpu_src_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_src*gpu_target_srcm_em - gpu_pred_src_src*gpu_target_srcm_em ), axis=[1,2,3]) - - gpu_src_loss += tf.reduce_mean ( 10*tf.square( gpu_target_srcm - gpu_pred_src_srcm ),axis=[1,2,3] ) - + if resolution < 256: gpu_dst_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1]) else: gpu_dst_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1]) gpu_dst_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/23.2) ), axis=[1]) gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dst_masked_opt- gpu_pred_dst_dst_masked_opt ), axis=[1,2,3]) - if eyes_mouth_prio: gpu_dst_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_dst*gpu_target_dstm_em - gpu_pred_dst_dst*gpu_target_dstm_em ), axis=[1,2,3]) - gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dstm - gpu_pred_dst_dstm ),axis=[1,2,3] ) - - gpu_dst_loss += 0.1*tf.reduce_mean(tf.square(gpu_pred_dst_dst_anti_masked-gpu_target_dst_anti_masked),axis=[1,2,3] ) - - gpu_src_losses += [gpu_src_loss] gpu_dst_losses += [gpu_dst_loss] - gpu_G_loss = gpu_src_loss + gpu_dst_loss + if not pretrain: + if resolution < 256: + gpu_src_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1]) + else: + gpu_src_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1]) + gpu_src_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1]) + gpu_src_loss += tf.reduce_mean ( 10*tf.square ( gpu_target_src_masked_opt - gpu_pred_src_src_masked_opt ), axis=[1,2,3]) + + if eyes_mouth_prio: + gpu_src_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_src*gpu_target_srcm_em - gpu_pred_src_src*gpu_target_srcm_em ), axis=[1,2,3]) + + gpu_src_loss += tf.reduce_mean ( 10*tf.square( gpu_target_srcm - gpu_pred_src_srcm ),axis=[1,2,3] ) + else: + gpu_src_loss = gpu_dst_loss + + gpu_src_losses += [gpu_src_loss] + + if pretrain: + gpu_G_loss = gpu_dst_loss + else: + gpu_G_loss = gpu_src_loss + gpu_dst_loss def DLossOnes(logits): return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits), logits=logits), axis=[1,2,3]) @@ -461,7 +495,6 @@ class AMPModel(ModelBase): def DLossZeros(logits): return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits), logits=logits), axis=[1,2,3]) - if gan_power != 0: gpu_pred_src_src_d, gpu_pred_src_src_d2 = self.GAN(gpu_pred_src_src_masked_opt) gpu_pred_dst_dst_d, gpu_pred_dst_dst_d2 = self.GAN(gpu_pred_dst_dst_masked_opt) @@ -485,7 +518,7 @@ class AMPModel(ModelBase): gpu_G_loss += 0.000001*nn.total_variation_mse(gpu_pred_src_src) gpu_G_loss += 0.02*tf.reduce_mean(tf.square(gpu_pred_src_src_anti_masked-gpu_target_src_anti_masked),axis=[1,2,3] ) - gpu_G_loss_gvs += [ nn.gradients ( gpu_G_loss, self.src_dst_trainable_weights ) ] + gpu_G_loss_gvs += [ nn.gradients ( gpu_G_loss, self.trainable_weights ) ] # Average losses and gradients, and create optimizer update ops @@ -563,10 +596,16 @@ class AMPModel(ModelBase): # Loading/initializing all models/optimizers weights for model, filename in io.progress_bar_generator(self.model_filename_list, "Initializing models"): - do_init = self.is_first_run() - if self.is_training and gan_power != 0 and model == self.GAN: - if self.gan_model_changed: + if self.pretrain_just_disabled: + do_init = False + if model == self.inter_src or model == self.inter_dst: do_init = True + else: + do_init = self.is_first_run() + if self.is_training and gan_power != 0 and model == self.GAN: + if self.gan_model_changed: + do_init = True + if not do_init: do_init = not model.load_weights( self.get_strpath_storage_for_file(filename) ) if do_init: @@ -577,10 +616,11 @@ class AMPModel(ModelBase): # initializing sample generators if self.is_training: - training_data_src_path = self.training_data_src_path - training_data_dst_path = self.training_data_dst_path + training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path() + training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path() + + random_ct_samples_path=training_data_dst_path if ct_mode is not None and not self.pretrain else None - random_ct_samples_path=training_data_dst_path if ct_mode is not None else None cpu_count = min(multiprocessing.cpu_count(), 8) src_generators_count = cpu_count // 2 @@ -596,7 +636,7 @@ class AMPModel(ModelBase): {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.EYES_MOUTH, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, ], - uniform_yaw_distribution=self.options['uniform_yaw'], + uniform_yaw_distribution=self.options['uniform_yaw'] or self.pretrain, generators_count=src_generators_count ), SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), @@ -606,19 +646,19 @@ class AMPModel(ModelBase): {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.EYES_MOUTH, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, ], - uniform_yaw_distribution=self.options['uniform_yaw'], + uniform_yaw_distribution=self.options['uniform_yaw'] or self.pretrain, generators_count=dst_generators_count ) ]) self.last_src_samples_loss = [] self.last_dst_samples_loss = [] - + if self.pretrain_just_disabled: + self.update_sample_for_preview(force_new=True) + def dump_ckpt(self): tf = nn.tf - - - with tf.device ('/CPU:0'): + with tf.device (nn.tf_default_device_name): warped_dst = tf.placeholder (nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face') warped_dst = tf.transpose(warped_dst, (0,3,1,2)) morph_value = tf.placeholder (nn.floatx, (1,), name='morph_value') @@ -638,14 +678,19 @@ class AMPModel(ModelBase): gpu_pred_dst_dstm = tf.transpose(gpu_pred_dst_dstm, (0,2,3,1)) gpu_pred_src_dstm = tf.transpose(gpu_pred_src_dstm, (0,2,3,1)) - - saver = tf.train.Saver() tf.identity(gpu_pred_dst_dstm, name='out_face_mask') tf.identity(gpu_pred_src_dst, name='out_celeb_face') tf.identity(gpu_pred_src_dstm, name='out_celeb_face_mask') - - saver.save(nn.tf_sess, self.get_strpath_storage_for_file('.ckpt') ) - + + output_graph_def = tf.graph_util.convert_variables_to_constants( + nn.tf_sess, + tf.get_default_graph().as_graph_def(), + ['out_face_mask','out_celeb_face','out_celeb_face_mask'] + ) + + pb_filepath = self.get_strpath_storage_for_file('.pb') + with tf.gfile.GFile(pb_filepath, "wb") as f: + f.write(output_graph_def.SerializeToString()) #override def get_model_filename_list(self): @@ -746,7 +791,7 @@ class AMPModel(ModelBase): def predictor_func (self, face, morph_value): face = nn.to_data_format(face[None,...], self.model_data_format, "NHWC") - + bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face, morph_value) ] return bgr[0], mask_src_dstm[0][...,0], mask_dst_dstm[0][...,0] From a6438ca494101281379a24595da5d3c629afaf0b Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 1 Jun 2021 13:30:06 +0400 Subject: [PATCH 47/47] upd magnet link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 575c866..b1b23d5 100644 --- a/README.md +++ b/README.md @@ -194,7 +194,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul -Windows (magnet link) +Windows (magnet link) Last release. Use torrent client to download.