if you have multi GPU in a system, you can choose what GPU to use on model start

This commit is contained in:
iperov 2019-01-10 19:22:54 +04:00
parent 48d0123f0b
commit bf5282f7ec
4 changed files with 32 additions and 8 deletions

View file

@ -18,7 +18,7 @@ You can implement your own model. Check examples.
class ModelBase(object):
#DONT OVERRIDE
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug = False, **in_options):
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug = False, force_best_gpu_idx=-1, **in_options):
print ("Loading model...")
self.model_path = model_path
self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
@ -54,6 +54,15 @@ class ModelBase(object):
if self.epoch == 0:
print ("\nModel first run. Enter model options as default for each run.")
if (self.epoch == 0 or ask_override) and (force_best_gpu_idx == -1):
idxs_names_list = nnlib.device.getAllDevicesIdxsWithNamesList()
if len(idxs_names_list) > 1:
print ("You have multi GPUs in a system: ")
for idx, name in idxs_names_list:
print ("[%d] : %s" % (idx, name) )
force_best_gpu_idx = input_int("Which GPU idx to choose? ( skip: system choice ) : ", -1)
if self.epoch == 0 or ask_override:
self.options['write_preview_history'] = input_bool("Write preview history? (y/n ?:help skip:n) : ", False, help_message="Preview history will be writed to <ModelName>_history folder.")
else:
@ -109,10 +118,13 @@ class ModelBase(object):
self.onInitializeOptions(self.epoch == 0, ask_override)
nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, **in_options) )
nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, force_best_gpu_idx=force_best_gpu_idx, **in_options) )
self.device_config = nnlib.active_DeviceConfig
self.created_vram_gb = self.options['created_vram_gb'] if 'created_vram_gb' in self.options.keys() else self.device_config.gpu_total_vram_gb
if self.epoch == 0:
self.created_vram_gb = self.options['created_vram_gb'] = self.device_config.gpu_total_vram_gb
else:
self.created_vram_gb = self.options['created_vram_gb'] = self.options.get('created_vram_gb',self.device_config.gpu_total_vram_gb)
self.onInitialize(**in_options)

View file

@ -24,7 +24,6 @@ class SAEModel(ModelBase):
def onInitializeOptions(self, is_first_run, ask_override):
default_resolution = 128
default_archi = 'liae'
default_style_power = 100
default_face_type = 'f'
if is_first_run:
@ -37,12 +36,14 @@ class SAEModel(ModelBase):
self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)
if is_first_run or ask_override:
self.options['face_style_power'] = np.clip ( input_int("Face style power (0..100 ?:help skip:100) : ", default_style_power, help_message="How fast NN will learn dst face style during generalization of src and dst faces."), 0, 100 )
default_style_power = 100 if is_first_run else self.options['face_style_power']
self.options['face_style_power'] = np.clip ( input_int("Face style power (0..100 ?:help skip:%d) : " % (default_style_power), default_style_power, help_message="How fast NN will learn dst face style during generalization of src and dst faces."), 0, 100 )
else:
self.options['face_style_power'] = self.options.get('face_style_power', default_style_power)
if is_first_run or ask_override:
self.options['bg_style_power'] = np.clip ( input_int("Background style power (0..100 ?:help skip:100) : ", default_style_power, help_message="How fast NN will learn dst background style during generalization of src and dst faces."), 0, 100 )
default_style_power = 100 if is_first_run else self.options['bg_style_power']
self.options['bg_style_power'] = np.clip ( input_int("Background style power (0..100 ?:help skip:%d) : " % (default_style_power), default_style_power, help_message="How fast NN will learn dst background style during generalization of src and dst faces."), 0, 100 )
else:
self.options['bg_style_power'] = self.options.get('bg_style_power', default_style_power)

View file

@ -109,6 +109,17 @@ class devicelib:
pass
return result
@staticmethod
def getAllDevicesIdxsWithNamesList ():
result = []
try:
nvmlInit()
result = [ (i, nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() ) for i in range(0, nvmlDeviceGetCount() ) ]
nvmlShutdown()
except:
pass
return result
@staticmethod
def getDeviceVRAMFree (idx):
result = 0

View file

@ -168,8 +168,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
for idx in device_config.gpu_idxs:
visible_device_list += str(idx) + ','
config.gpu_options.visible_device_list=visible_device_list[:-1]
config.gpu_options.force_gpu_compatible = True
config.gpu_options.force_gpu_compatible = True
config.gpu_options.allow_growth = device_config.allow_growth
nnlib.tf_sess = tf.Session(config=config)
@ -177,7 +177,6 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
if suppressor is not None:
suppressor.__exit__()
nnlib.__initialize_tf_functions()
nnlib.code_import_tf = compile (nnlib.code_import_tf_string,'','exec')
return nnlib.code_import_tf
@ -395,6 +394,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
nnlib.keras.backend.set_floatx('float16')
nnlib.keras.backend.set_session(nnlib.tf_sess)
nnlib.keras.backend.set_image_data_format('channels_last')
if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
suppressor.__exit__()