diff --git a/models/ModelBase.py b/models/ModelBase.py index 00bf8ca..99bba94 100644 --- a/models/ModelBase.py +++ b/models/ModelBase.py @@ -18,7 +18,7 @@ You can implement your own model. Check examples. class ModelBase(object): #DONT OVERRIDE - def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug = False, **in_options): + def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug = False, force_best_gpu_idx=-1, **in_options): print ("Loading model...") self.model_path = model_path self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') ) @@ -53,6 +53,15 @@ class ModelBase(object): if self.epoch == 0: print ("\nModel first run. Enter model options as default for each run.") + + if (self.epoch == 0 or ask_override) and (force_best_gpu_idx == -1): + idxs_names_list = nnlib.device.getAllDevicesIdxsWithNamesList() + if len(idxs_names_list) > 1: + print ("You have multi GPUs in a system: ") + for idx, name in idxs_names_list: + print ("[%d] : %s" % (idx, name) ) + + force_best_gpu_idx = input_int("Which GPU idx to choose? ( skip: system choice ) : ", -1) if self.epoch == 0 or ask_override: self.options['write_preview_history'] = input_bool("Write preview history? (y/n ?:help skip:n) : ", False, help_message="Preview history will be writed to _history folder.") @@ -109,10 +118,13 @@ class ModelBase(object): self.onInitializeOptions(self.epoch == 0, ask_override) - nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, **in_options) ) + nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, force_best_gpu_idx=force_best_gpu_idx, **in_options) ) self.device_config = nnlib.active_DeviceConfig - self.created_vram_gb = self.options['created_vram_gb'] if 'created_vram_gb' in self.options.keys() else self.device_config.gpu_total_vram_gb + if self.epoch == 0: + self.created_vram_gb = self.options['created_vram_gb'] = self.device_config.gpu_total_vram_gb + else: + self.created_vram_gb = self.options['created_vram_gb'] = self.options.get('created_vram_gb',self.device_config.gpu_total_vram_gb) self.onInitialize(**in_options) diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index f94a9f1..e46c27b 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -24,7 +24,6 @@ class SAEModel(ModelBase): def onInitializeOptions(self, is_first_run, ask_override): default_resolution = 128 default_archi = 'liae' - default_style_power = 100 default_face_type = 'f' if is_first_run: @@ -37,12 +36,14 @@ class SAEModel(ModelBase): self.options['lighter_encoder'] = self.options.get('lighter_encoder', False) if is_first_run or ask_override: - self.options['face_style_power'] = np.clip ( input_int("Face style power (0..100 ?:help skip:100) : ", default_style_power, help_message="How fast NN will learn dst face style during generalization of src and dst faces."), 0, 100 ) + default_style_power = 100 if is_first_run else self.options['face_style_power'] + self.options['face_style_power'] = np.clip ( input_int("Face style power (0..100 ?:help skip:%d) : " % (default_style_power), default_style_power, help_message="How fast NN will learn dst face style during generalization of src and dst faces."), 0, 100 ) else: self.options['face_style_power'] = self.options.get('face_style_power', default_style_power) if is_first_run or ask_override: - self.options['bg_style_power'] = np.clip ( input_int("Background style power (0..100 ?:help skip:100) : ", default_style_power, help_message="How fast NN will learn dst background style during generalization of src and dst faces."), 0, 100 ) + default_style_power = 100 if is_first_run else self.options['bg_style_power'] + self.options['bg_style_power'] = np.clip ( input_int("Background style power (0..100 ?:help skip:%d) : " % (default_style_power), default_style_power, help_message="How fast NN will learn dst background style during generalization of src and dst faces."), 0, 100 ) else: self.options['bg_style_power'] = self.options.get('bg_style_power', default_style_power) diff --git a/nnlib/devicelib.py b/nnlib/devicelib.py index c0248dd..cd3c549 100644 --- a/nnlib/devicelib.py +++ b/nnlib/devicelib.py @@ -109,6 +109,17 @@ class devicelib: pass return result + @staticmethod + def getAllDevicesIdxsWithNamesList (): + result = [] + try: + nvmlInit() + result = [ (i, nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() ) for i in range(0, nvmlDeviceGetCount() ) ] + nvmlShutdown() + except: + pass + return result + @staticmethod def getDeviceVRAMFree (idx): result = 0 diff --git a/nnlib/nnlib.py b/nnlib/nnlib.py index ec11380..79d9d29 100644 --- a/nnlib/nnlib.py +++ b/nnlib/nnlib.py @@ -168,15 +168,14 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator for idx in device_config.gpu_idxs: visible_device_list += str(idx) + ',' config.gpu_options.visible_device_list=visible_device_list[:-1] - config.gpu_options.force_gpu_compatible = True + config.gpu_options.force_gpu_compatible = True config.gpu_options.allow_growth = device_config.allow_growth nnlib.tf_sess = tf.Session(config=config) if suppressor is not None: suppressor.__exit__() - nnlib.__initialize_tf_functions() nnlib.code_import_tf = compile (nnlib.code_import_tf_string,'','exec') @@ -395,6 +394,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator nnlib.keras.backend.set_floatx('float16') nnlib.keras.backend.set_session(nnlib.tf_sess) + nnlib.keras.backend.set_image_data_format('channels_last') if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1': suppressor.__exit__()