From 8da47fec13f077f4ef7866a970b533c3a1fe1cc6 Mon Sep 17 00:00:00 2001 From: iperov Date: Wed, 13 Mar 2019 20:53:59 +0400 Subject: [PATCH] fix ModelBase, nnlib --- models/ModelBase.py | 2 +- models/Model_SAE/Model.py | 4 +- nnlib/nnlib.py | 103 ++++++++++++++++++++++++++------------ 3 files changed, 74 insertions(+), 35 deletions(-) diff --git a/models/ModelBase.py b/models/ModelBase.py index 772775e..06a196b 100644 --- a/models/ModelBase.py +++ b/models/ModelBase.py @@ -90,7 +90,7 @@ class ModelBase(object): if self.iter == 0 or ask_override: default_batch_size = 0 if self.iter == 0 else self.options.get('batch_size',0) - self.options['batch_size'] = max(0, io.input_int("Batch_size (?:help skip:0/default) : ", default_batch_size, help_message="Larger batch size is always better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually.")) + self.options['batch_size'] = max(0, io.input_int("Batch_size (?:help skip:%d) : " % (default_batch_size), default_batch_size, help_message="Larger batch size is always better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually.")) else: self.options['batch_size'] = self.options.get('batch_size', 0) diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index 4387da2..1f0438b 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -271,8 +271,8 @@ class SAEModel(ModelBase): psd_target_dst_anti_masked_ar = [ pred_src_dst_sigm_ar[i]*target_dstm_anti_sigm_ar[i] for i in range(len(pred_src_dst_sigm_ar))] if self.is_training_mode: - self.src_dst_opt = AdamCPU(lr=5e-5, beta_1=0.5, beta_2=0.999, tf_cpu_mode=self.options['optimizer_mode']-1) - self.src_dst_mask_opt = AdamCPU(lr=5e-5, beta_1=0.5, beta_2=0.999, tf_cpu_mode=self.options['optimizer_mode']-1) + self.src_dst_opt = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999, tf_cpu_mode=self.options['optimizer_mode']-1) + self.src_dst_mask_opt = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999, tf_cpu_mode=self.options['optimizer_mode']-1) if self.options['archi'] == 'liae': src_dst_loss_train_weights = self.encoder.trainable_weights + self.inter_B.trainable_weights + self.inter_AB.trainable_weights + self.decoder.trainable_weights diff --git a/nnlib/nnlib.py b/nnlib/nnlib.py index 213428c..315387e 100644 --- a/nnlib/nnlib.py +++ b/nnlib/nnlib.py @@ -71,8 +71,8 @@ ZeroPadding2D = keras.layers.ZeroPadding2D RandomNormal = keras.initializers.RandomNormal Model = keras.models.Model -Adam = keras.optimizers.Adam -AdamCPU = nnlib.AdamCPU +#Adam = keras.optimizers.Adam +Adam = nnlib.Adam modelify = nnlib.modelify gaussian_blur = nnlib.gaussian_blur @@ -194,7 +194,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator def __initialize_keras_functions(): keras = nnlib.keras K = keras.backend - + def modelify(model_functor): def func(tensor): return keras.models.Model (tensor, model_functor(tensor)) @@ -428,51 +428,89 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator return dict(list(base_config.items()) + list(config.items())) nnlib.Scale = Scale - class AdamCPU(keras.optimizers.Optimizer): + class Adam(keras.optimizers.Optimizer): + """Adam optimizer. + + Default parameters follow those provided in the original paper. + + # Arguments + lr: float >= 0. Learning rate. + beta_1: float, 0 < beta < 1. Generally close to 1. + beta_2: float, 0 < beta < 1. Generally close to 1. + epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. + decay: float >= 0. Learning rate decay over each update. + amsgrad: boolean. Whether to apply the AMSGrad variant of this + algorithm from the paper "On the Convergence of Adam and + Beyond". + tf_cpu_mode: only for tensorflow backend + 0 - default, no changes. + 1 - allows to train x2 bigger network on same VRAM consuming RAM + 2 - allows to train x3 bigger network on same VRAM consuming RAM*2 and CPU power. + + # References + - [Adam - A Method for Stochastic Optimization] + (https://arxiv.org/abs/1412.6980v8) + - [On the Convergence of Adam and Beyond] + (https://openreview.net/forum?id=ryQu7f-RZ) + """ + def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, - tf_cpu_mode=0, **kwargs): - super(AdamCPU, self).__init__(**kwargs) + epsilon=None, decay=0., amsgrad=False, tf_cpu_mode=0, **kwargs): + super(Adam, self).__init__(**kwargs) with K.name_scope(self.__class__.__name__): self.iterations = K.variable(0, dtype='int64', name='iterations') self.lr = K.variable(lr, name='lr') self.beta_1 = K.variable(beta_1, name='beta_1') self.beta_2 = K.variable(beta_2, name='beta_2') - - self.epsilon = K.epsilon() + self.decay = K.variable(decay, name='decay') + if epsilon is None: + epsilon = K.epsilon() + self.epsilon = epsilon + self.initial_decay = decay + self.amsgrad = amsgrad self.tf_cpu_mode = tf_cpu_mode - @keras.legacy.interfaces.legacy_get_updates_support def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr + if self.initial_decay > 0: + lr = lr * (1. / (1. + self.decay * K.cast(self.iterations, + K.dtype(self.decay)))) t = K.cast(self.iterations, K.floatx()) + 1 lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) / - (1. - K.pow(self.beta_1, t))) + (1. - K.pow(self.beta_1, t))) - if self.tf_cpu_mode > 0: - with K.tf.device("/cpu:0"): - ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] - vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + e = K.tf.device("/cpu:0") if self.tf_cpu_mode > 0 else None + if e: e.__enter__() + ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + if self.amsgrad: + vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] else: - ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] - vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] + vhats = [K.zeros(1) for _ in params] + if e: e.__exit__(None, None, None) + + self.weights = [self.iterations] + ms + vs + vhats - self.weights = [self.iterations] + ms + vs - - for p, g, m, v in zip(params, grads, ms, vs): - if self.tf_cpu_mode == 2: - with K.tf.device("/cpu:0"): - m_t = (self.beta_1 * m) + (1. - self.beta_1) * g - v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) + for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): + e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None + if e: e.__enter__() + m_t = (self.beta_1 * m) + (1. - self.beta_1) * g + v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) + + if self.amsgrad: + vhat_t = K.maximum(vhat, v_t) + self.updates.append(K.update(vhat, vhat_t)) + if e: e.__exit__(None, None, None) + + if self.amsgrad: + p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon) else: - m_t = (self.beta_1 * m) + (1. - self.beta_1) * g - v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) + p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) new_p = p_t @@ -487,12 +525,13 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator def get_config(self): config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), - 'beta_2': float(K.get_value(self.beta_2)) - } - base_config = super(AdamCPU, self).get_config() + 'beta_2': float(K.get_value(self.beta_2)), + 'decay': float(K.get_value(self.decay)), + 'epsilon': self.epsilon, + 'amsgrad': self.amsgrad} + base_config = super(Adam, self).get_config() return dict(list(base_config.items()) + list(config.items())) - - nnlib.AdamCPU = AdamCPU + nnlib.Adam = Adam ''' not implemented in plaidML class ReflectionPadding2D(keras.layers.Layer):