Added reading options from yaml file

This commit is contained in:
Cioscos 2021-11-29 12:02:20 +01:00
commit 44dcdbea7e
4 changed files with 152 additions and 92 deletions

View file

@ -131,6 +131,7 @@ if __name__ == "__main__":
'start_tensorboard' : arguments.start_tensorboard,
'dump_ckpt' : arguments.dump_ckpt,
'flask_preview' : arguments.flask_preview,
'config_training_file' : arguments.config_training_file
}
from mainscripts import Trainer
Trainer.main(**kwargs)
@ -150,6 +151,7 @@ if __name__ == "__main__":
p.add_argument('--silent-start', action="store_true", dest="silent_start", default=False, help="Silent start. Automatically chooses Best GPU and last used model.")
p.add_argument('--tensorboard-logdir', action=fixPathAction, dest="tensorboard_dir", help="Directory of the tensorboard output files")
p.add_argument('--start-tensorboard', action="store_true", dest="start_tensorboard", default=False, help="Automatically start the tensorboard server preconfigured to the tensorboard-logdir")
p.add_argument('--config-training-file', action=fixPathAction, dest="config_training_file", help="Path to custom yaml configuration file")
p.add_argument('--dump-ckpt', action="store_true", dest="dump_ckpt", default=False, help="Dump the model to ckpt format.")

View file

@ -67,6 +67,7 @@ def trainerThread (s2c, c2s, e,
debug=False,
tensorboard_dir=None,
start_tensorboard=False,
config_training_file=None,
dump_ckpt=False,
**kwargs):
while True:
@ -97,6 +98,7 @@ def trainerThread (s2c, c2s, e,
force_gpu_idxs=force_gpu_idxs,
cpu_only=cpu_only,
silent_start=silent_start,
config_training_file=config_training_file,
debug=debug)
is_reached_goal = model.is_reached_iter_goal()

View file

@ -10,6 +10,7 @@ import tempfile
import time
import datetime
from pathlib import Path
import yaml
import cv2
import numpy as np
@ -35,6 +36,7 @@ class ModelBase(object):
cpu_only=False,
debug=False,
force_model_class_name=None,
config_training_file=None,
silent_start=False,
**kwargs):
self.is_training = is_training
@ -140,13 +142,34 @@ class ModelBase(object):
self.sample_for_preview = None
self.choosed_gpu_indexes = None
# MODIFY HERE!!! ---------------------------------------------------------------------------------------
model_data = {}
self.config_file_exists = False
self.read_from_conf = False
#check if config_training_file mode is enabled
if config_training_file is not None:
self.config_file_path = Path(config_training_file)
if self.config_file_path.exists():
self.read_from_conf = io.input_bool(
f'Do you want to read training options from {self.config_file_path.stem} file?',
False,
'Read options from configuration file instead of asking one by one each option'
)
if self.read_from_conf:
self.options = self.read_from_config_file()
self.config_file_exists = True
else:
io.log_info(f"Configuration file doesn't exist. A standard configuration file will be created.")
self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
if self.model_data_path.exists():
io.log_info (f"Loading {self.model_name} model...")
model_data = pickle.loads ( self.model_data_path.read_bytes() )
self.iter = model_data.get('iter',0)
if self.iter != 0:
if not self.config_file_exists:
self.options = model_data['options']
self.loss_history = model_data.get('loss_history', [])
self.sample_for_preview = model_data.get('sample_for_preview', None)
@ -183,6 +206,9 @@ class ModelBase(object):
if self.is_first_run():
# save as default options only for first run model initialize
self.default_options_path.write_bytes( pickle.dumps (self.options) )
# save config file
self.save_config_file()
self.session_name = self.options.get('session_name', "")
self.autobackup_hour = self.options.get('autobackup_hour', 0)
self.maximum_n_backups = self.options.get('maximum_n_backups', 24)
@ -426,6 +452,29 @@ class ModelBase(object):
self.autobackup_start_time += self.autobackup_hour*3600
self.create_backup()
def read_from_config_file(self):
with open(self.config_file_path, 'r') as file:
data = yaml.safe_load(file)
for key, value in data.items():
if isinstance(value, int):
data[key] = np.int32(value)
elif isinstance(value, float):
data[key] = np.float64(value)
return data
def save_config_file(self):
saving_dict = {}
for key, value in self.options.items():
if isinstance(value, np.int32) or isinstance(value, np.float64):
saving_dict[key] = value.item()
else:
saving_dict[key] = value
with open(self.config_file_path, 'w') as file:
yaml.dump(saving_dict, file)
def create_backup(self):
io.log_info ("Creating backup...", end='\r')
@ -558,6 +607,8 @@ class ModelBase(object):
def get_strpath_storage_for_file(self, filename):
return str( self.saved_models_path / ( self.get_model_name() + '_' + filename) )
def get_summary_path(self):
return self.get_strpath_storage_for_file('summary.txt')

View file

@ -28,7 +28,7 @@ class SAEHDModel(ModelBase):
min_res = 64
max_res = 640
#default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_resolution = self.options['resolution'] = self.load_or_def_option('resolution', 128)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'f')
default_models_opt_on_gpu = self.options['models_opt_on_gpu'] = self.load_or_def_option('models_opt_on_gpu', True)
@ -71,6 +71,7 @@ class SAEHDModel(ModelBase):
ask_override = self.ask_override()
if self.is_first_run() or ask_override:
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.ask_session_name()
self.ask_autobackup_hour()
self.ask_maximum_n_backups()
@ -80,9 +81,10 @@ class SAEHDModel(ModelBase):
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(suggest_batch_size)
#self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
if self.is_first_run():
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16 and 32 for -d archi.")
resolution = np.clip ( (resolution // 16) * 16, min_res, max_res)
self.options['resolution'] = resolution
@ -90,13 +92,13 @@ class SAEHDModel(ModelBase):
while True:
archi = io.input_str ("AE architecture", default_archi, help_message=\
"""
'df' keeps more identity-preserved face.
'liae' can fix overly different face shapes.
'-u' increased likeness of the face.
'-d' (experimental) doubling the resolution using the same computation cost.
Examples: df, liae, df-d, df-ud, liae-ud, ...
""").lower()
"""
'df' keeps more identity-preserved face.
'liae' can fix overly different face shapes.
'-u' increased likeness of the face.
'-d' (experimental) doubling the resolution using the same computation cost.
Examples: df, liae, df-d, df-ud, liae-ud, ...
""").lower()
archi_split = archi.split('-')
@ -129,6 +131,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
if self.is_first_run():
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
@ -141,6 +144,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
if self.is_first_run() or ask_override:
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head' or self.options['face_type'] == 'custom':
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")
@ -157,6 +161,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
default_gan_noise = self.options['gan_noise'] = self.load_or_def_option('gan_noise', 0.0)
if self.is_first_run() or ask_override:
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
self.options['adabelief'] = io.input_bool ("Use AdaBelief optimizer?", default_adabelief, help_message="Use AdaBelief optimizer. It requires more VRAM, but the accuracy and the generalization of the model is higher.")