diff --git a/models/ModelBase.py b/models/ModelBase.py index 692d5a8..9c11c6f 100644 --- a/models/ModelBase.py +++ b/models/ModelBase.py @@ -34,6 +34,11 @@ class ModelBase(object): device_args['force_gpu_idx'] = io.input_int("Which GPU idx to choose? ( skip: best GPU ) : ", -1, [ x[0] for x in idxs_names_list] ) self.device_args = device_args + nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, **self.device_args) ) + self.device_config = nnlib.active_DeviceConfig + self.keras = nnlib.keras + self.K = nnlib.keras.backend + io.log_info ("Loading model...") self.model_path = model_path @@ -121,14 +126,9 @@ class ModelBase(object): self.src_scale_mod = self.options['src_scale_mod'] if self.src_scale_mod == 0: self.options.pop('src_scale_mod') - + + self.onInitializeOptions(self.iter == 0, ask_override) - - nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, **self.device_args) ) - self.device_config = nnlib.active_DeviceConfig - self.keras = nnlib.keras - self.K = nnlib.keras.backend - self.onInitialize() self.options['batch_size'] = self.batch_size diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index d2151b4..6c5f4cb 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -42,11 +42,12 @@ class SAEModel(ModelBase): self.options['face_type'] = self.options.get('face_type', default_face_type) self.options['learn_mask'] = self.options.get('learn_mask', True) - if is_first_run or ask_override: - def_simple_optimizer = self.options.get('simple_optimizer', False) - self.options['simple_optimizer'] = io.input_bool ("Use simple optimizer? (y/n, ?:help skip:%s) : " % ( yn_str[def_simple_optimizer] ), def_simple_optimizer, help_message="Simple optimizer allows you to train bigger network or more batch size, sacrificing training accuracy.") + + if (is_first_run or ask_override) and 'tensorflow' in self.device_config.backend: + def_optimizer_mode = self.options.get('optimizer_mode', 1) + self.options['optimizer_mode'] = io.input_int ("Optimizer mode? ( 1,2,3 ?:help skip:%d) : " % (def_optimizer_mode), def_optimizer_mode, help_message="1 - no changes. 2 - allows you to train x2 bigger network consuming RAM. 3 - allows you to train x3 bigger network consuming huge amount of RAM and slower, depends on CPU power.") else: - self.options['simple_optimizer'] = self.options.get('simple_optimizer', False) + self.options['optimizer_mode'] = self.options.get('optimizer_mode', 1) if is_first_run: self.options['archi'] = io.input_str ("AE architecture (df, liae, vg ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae','vg'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'vg' - currently testing.").lower() @@ -269,14 +270,10 @@ class SAEModel(ModelBase): psd_target_dst_masked_ar = [ pred_src_dst_sigm_ar[i]*target_dstm_sigm_ar[i] for i in range(len(pred_src_dst_sigm_ar))] psd_target_dst_anti_masked_ar = [ pred_src_dst_sigm_ar[i]*target_dstm_anti_sigm_ar[i] for i in range(len(pred_src_dst_sigm_ar))] - if self.is_training_mode: - if self.options['simple_optimizer']: - self.src_dst_opt = DFLOptimizer(lr=5e-5) - self.src_dst_mask_opt = DFLOptimizer(lr=5e-5) - else: - self.src_dst_opt = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) - self.src_dst_mask_opt = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) - + if self.is_training_mode: + self.src_dst_opt = AdamCPU(lr=5e-5, beta_1=0.5, beta_2=0.999, tf_cpu_mode=self.options['optimizer_mode']-1) + self.src_dst_mask_opt = AdamCPU(lr=5e-5, beta_1=0.5, beta_2=0.999, tf_cpu_mode=self.options['optimizer_mode']-1) + if self.options['archi'] == 'liae': src_dst_loss_train_weights = self.encoder.trainable_weights + self.inter_B.trainable_weights + self.inter_AB.trainable_weights + self.decoder.trainable_weights if self.options['learn_mask']: diff --git a/nnlib/nnlib.py b/nnlib/nnlib.py index 618caaf..95f83a5 100644 --- a/nnlib/nnlib.py +++ b/nnlib/nnlib.py @@ -72,7 +72,7 @@ RandomNormal = keras.initializers.RandomNormal Model = keras.models.Model Adam = keras.optimizers.Adam -DFLOptimizer = nnlib.DFLOptimizer +AdamCPU = nnlib.AdamCPU modelify = nnlib.modelify gaussian_blur = nnlib.gaussian_blur @@ -434,28 +434,93 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator return dict(list(base_config.items()) + list(config.items())) nnlib.Scale = Scale - class DFLOptimizer(keras.optimizers.Optimizer): - def __init__(self, lr=0.001, **kwargs): - super(DFLOptimizer, self).__init__(**kwargs) + class AdamCPU(keras.optimizers.Optimizer): + """Adam optimizer. + Default parameters follow those provided in the original paper. + # Arguments + lr: float >= 0. Learning rate. + beta_1: float, 0 < beta < 1. Generally close to 1. + beta_2: float, 0 < beta < 1. Generally close to 1. + epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. + decay: float >= 0. Learning rate decay over each update. + amsgrad: boolean. Whether to apply the AMSGrad variant of this + algorithm from the paper "On the Convergence of Adam and + Beyond". + # References + - [Adam - A Method for Stochastic Optimization]( + https://arxiv.org/abs/1412.6980v8) + - [On the Convergence of Adam and Beyond]( + https://openreview.net/forum?id=ryQu7f-RZ) + """ + + def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, + epsilon=None, decay=0., amsgrad=False, tf_cpu_mode=0, **kwargs): + super(AdamCPU, self).__init__(**kwargs) with K.name_scope(self.__class__.__name__): self.iterations = K.variable(0, dtype='int64', name='iterations') self.lr = K.variable(lr, name='lr') - self.beta_1 = K.variable(0.9, name='beta_1') - self.beta_2 = K.variable(0.998, name='beta_2') + self.beta_1 = K.variable(beta_1, name='beta_1') + self.beta_2 = K.variable(beta_2, name='beta_2') + self.decay = K.variable(decay, name='decay') + if epsilon is None: + epsilon = K.epsilon() + self.epsilon = epsilon + self.initial_decay = decay + self.amsgrad = amsgrad + self.tf_cpu_mode = tf_cpu_mode @keras.legacy.interfaces.legacy_get_updates_support def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] - lr_t = self.lr * ( ( K.cast(self.iterations, K.floatx()) ) % 100 + 1 ) / 100.0 + + lr = self.lr + if self.initial_decay > 0: + lr = lr * (1. / (1. + self.decay * K.cast(self.iterations, + K.dtype(self.decay)))) + + t = K.cast(self.iterations, K.floatx()) + 1 + lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) / + (1. - K.pow(self.beta_1, t))) - self.weights = [] - for p, g in zip(params, grads): + if self.tf_cpu_mode > 0: + with K.tf.device("/cpu:0"): + ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + if self.amsgrad: + vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + else: + vhats = [K.zeros(1) for _ in params] + else: + ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + if self.amsgrad: + vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + else: + vhats = [K.zeros(1) for _ in params] - m_t = (1. - self.beta_1) * g - v_t = (1. - self.beta_2) * K.square(g) - new_p = p - lr_t * m_t / (K.sqrt(v_t) + K.epsilon() ) + self.weights = [self.iterations] + ms + vs + vhats + + for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): + if self.tf_cpu_mode == 2: + with K.tf.device("/cpu:0"): + m_t = (self.beta_1 * m) + (1. - self.beta_1) * g + v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) + else: + m_t = (self.beta_1 * m) + (1. - self.beta_1) * g + v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) + + if self.amsgrad: + vhat_t = K.maximum(vhat, v_t) + p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon) + self.updates.append(K.update(vhat, vhat_t)) + else: + p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) + + self.updates.append(K.update(m, m_t)) + self.updates.append(K.update(v, v_t)) + new_p = p_t # Apply constraints. if getattr(p, 'constraint', None) is not None: @@ -467,13 +532,14 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), - 'beta_2': float(K.get_value(self.beta_2)) - } - base_config = super(DFLOptimizer, self).get_config() + 'beta_2': float(K.get_value(self.beta_2)), + 'decay': float(K.get_value(self.decay)), + 'epsilon': self.epsilon, + 'amsgrad': self.amsgrad} + base_config = super(AdamCPU, self).get_config() return dict(list(base_config.items()) + list(config.items())) - - nnlib.DFLOptimizer = DFLOptimizer - + + nnlib.AdamCPU = AdamCPU ''' not implemented in plaidML class ReflectionPadding2D(keras.layers.Layer):