refactoring

This commit is contained in:
iperov 2019-03-21 18:58:38 +04:00
commit 565af4d1da

View file

@ -17,24 +17,24 @@ class nnlib(object):
active_DeviceConfig = DeviceConfig() #default is one best GPU active_DeviceConfig = DeviceConfig() #default is one best GPU
dlib = None dlib = None
keras = None keras = None
keras_contrib = None keras_contrib = None
tf = None tf = None
tf_sess = None tf_sess = None
PML = None PML = None
PMLK = None PMLK = None
PMLTile= None PMLTile= None
code_import_keras = None code_import_keras = None
code_import_keras_contrib = None code_import_keras_contrib = None
code_import_all = None code_import_all = None
code_import_dlib = None code_import_dlib = None
ResNet = None ResNet = None
UNet = None UNet = None
UNetTemporalPredictor = None UNetTemporalPredictor = None
@ -44,34 +44,35 @@ class nnlib(object):
""" """
keras = nnlib.keras keras = nnlib.keras
K = keras.backend K = keras.backend
KL = keras.layers
Input = keras.layers.Input Input = KL.Input
Dense = keras.layers.Dense Dense = KL.Dense
Conv2D = keras.layers.Conv2D Conv2D = KL.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose Conv2DTranspose = KL.Conv2DTranspose
SeparableConv2D = keras.layers.SeparableConv2D SeparableConv2D = KL.SeparableConv2D
MaxPooling2D = keras.layers.MaxPooling2D MaxPooling2D = KL.MaxPooling2D
UpSampling2D = keras.layers.UpSampling2D UpSampling2D = KL.UpSampling2D
BatchNormalization = keras.layers.BatchNormalization BatchNormalization = KL.BatchNormalization
LeakyReLU = keras.layers.LeakyReLU LeakyReLU = KL.LeakyReLU
ReLU = keras.layers.ReLU ReLU = KL.ReLU
PReLU = keras.layers.PReLU PReLU = KL.PReLU
tanh = keras.layers.Activation('tanh') tanh = KL.Activation('tanh')
sigmoid = keras.layers.Activation('sigmoid') sigmoid = KL.Activation('sigmoid')
Dropout = keras.layers.Dropout Dropout = KL.Dropout
Softmax = keras.layers.Softmax Softmax = KL.Softmax
Lambda = keras.layers.Lambda Lambda = KL.Lambda
Add = keras.layers.Add Add = KL.Add
Concatenate = keras.layers.Concatenate Concatenate = KL.Concatenate
Flatten = keras.layers.Flatten Flatten = KL.Flatten
Reshape = keras.layers.Reshape Reshape = KL.Reshape
ZeroPadding2D = keras.layers.ZeroPadding2D ZeroPadding2D = KL.ZeroPadding2D
RandomNormal = keras.initializers.RandomNormal RandomNormal = keras.initializers.RandomNormal
Model = keras.models.Model Model = keras.models.Model
@ -86,6 +87,8 @@ dssim = nnlib.dssim
PixelShuffler = nnlib.PixelShuffler PixelShuffler = nnlib.PixelShuffler
SubpixelUpscaler = nnlib.SubpixelUpscaler SubpixelUpscaler = nnlib.SubpixelUpscaler
Scale = nnlib.Scale Scale = nnlib.Scale
Capsule = nnlib.Capsule
CAInitializerMP = nnlib.CAInitializerMP CAInitializerMP = nnlib.CAInitializerMP
#ReflectionPadding2D = nnlib.ReflectionPadding2D #ReflectionPadding2D = nnlib.ReflectionPadding2D
@ -100,7 +103,7 @@ InstanceNormalization = keras_contrib.layers.InstanceNormalization
code_import_dlib_string = \ code_import_dlib_string = \
""" """
dlib = nnlib.dlib dlib = nnlib.dlib
""" """
code_import_all_string = \ code_import_all_string = \
""" """
@ -110,8 +113,8 @@ UNet = nnlib.UNet
UNetTemporalPredictor = nnlib.UNetTemporalPredictor UNetTemporalPredictor = nnlib.UNetTemporalPredictor
NLayerDiscriminator = nnlib.NLayerDiscriminator NLayerDiscriminator = nnlib.NLayerDiscriminator
""" """
@staticmethod @staticmethod
def _import_tf(device_config): def _import_tf(device_config):
if nnlib.tf is not None: if nnlib.tf is not None:
@ -121,20 +124,20 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
suppressor = std_utils.suppress_stdout_stderr().__enter__() suppressor = std_utils.suppress_stdout_stderr().__enter__()
else: else:
suppressor = None suppressor = None
if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(): if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
os.environ.pop('CUDA_VISIBLE_DEVICES') os.environ.pop('CUDA_VISIBLE_DEVICES')
os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2' os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #tf log errors only os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #tf log errors only
import tensorflow as tf import tensorflow as tf
nnlib.tf = tf nnlib.tf = tf
if device_config.cpu_only: if device_config.cpu_only:
config = tf.ConfigProto(device_count={'GPU': 0}) config = tf.ConfigProto(device_count={'GPU': 0})
else: else:
config = tf.ConfigProto() config = tf.ConfigProto()
if device_config.backend != "tensorflow-generic": if device_config.backend != "tensorflow-generic":
#tensorflow-generic is system with NVIDIA card, but w/o NVSMI #tensorflow-generic is system with NVIDIA card, but w/o NVSMI
#so dont hide devices and let tensorflow to choose best card #so dont hide devices and let tensorflow to choose best card
@ -142,15 +145,15 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
for idx in device_config.gpu_idxs: for idx in device_config.gpu_idxs:
visible_device_list += str(idx) + ',' visible_device_list += str(idx) + ','
config.gpu_options.visible_device_list=visible_device_list[:-1] config.gpu_options.visible_device_list=visible_device_list[:-1]
config.gpu_options.force_gpu_compatible = True config.gpu_options.force_gpu_compatible = True
config.gpu_options.allow_growth = device_config.allow_growth config.gpu_options.allow_growth = device_config.allow_growth
nnlib.tf_sess = tf.Session(config=config) nnlib.tf_sess = tf.Session(config=config)
if suppressor is not None: if suppressor is not None:
suppressor.__exit__() suppressor.__exit__()
@staticmethod @staticmethod
def import_keras(device_config): def import_keras(device_config):
if nnlib.keras is not None: if nnlib.keras is not None:
@ -164,48 +167,49 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1': if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
suppressor = std_utils.suppress_stdout_stderr().__enter__() suppressor = std_utils.suppress_stdout_stderr().__enter__()
#if "tensorflow" in device_config.backend: #if "tensorflow" in device_config.backend:
# nnlib.keras = nnlib.tf.keras # nnlib.keras = nnlib.tf.keras
#else: #else:
import keras as keras_ import keras as keras_
nnlib.keras = keras_ nnlib.keras = keras_
if device_config.backend == "plaidML": if device_config.backend == "plaidML":
import plaidml import plaidml
import plaidml.tile import plaidml.tile
nnlib.PML = plaidml nnlib.PML = plaidml
nnlib.PMLK = plaidml.keras.backend nnlib.PMLK = plaidml.keras.backend
nnlib.PMLTile = plaidml.tile nnlib.PMLTile = plaidml.tile
if device_config.use_fp16: if device_config.use_fp16:
nnlib.keras.backend.set_floatx('float16') nnlib.keras.backend.set_floatx('float16')
if "tensorflow" in device_config.backend: if "tensorflow" in device_config.backend:
nnlib.keras.backend.set_session(nnlib.tf_sess) nnlib.keras.backend.set_session(nnlib.tf_sess)
nnlib.keras.backend.set_image_data_format('channels_last') nnlib.keras.backend.set_image_data_format('channels_last')
if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1': if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1':
suppressor.__exit__() suppressor.__exit__()
nnlib.code_import_keras = compile (nnlib.code_import_keras_string,'','exec') nnlib.code_import_keras = compile (nnlib.code_import_keras_string,'','exec')
nnlib.__initialize_keras_functions() nnlib.__initialize_keras_functions()
return nnlib.code_import_keras return nnlib.code_import_keras
@staticmethod @staticmethod
def __initialize_keras_functions(): def __initialize_keras_functions():
keras = nnlib.keras keras = nnlib.keras
K = keras.backend K = keras.backend
KL = keras.layers
def modelify(model_functor): def modelify(model_functor):
def func(tensor): def func(tensor):
return keras.models.Model (tensor, model_functor(tensor)) return keras.models.Model (tensor, model_functor(tensor))
return func return func
nnlib.modelify = modelify nnlib.modelify = modelify
def gaussian_blur(radius=2.0): def gaussian_blur(radius=2.0):
def gaussian(x, mu, sigma): def gaussian(x, mu, sigma):
return np.exp(-(float(x) - float(mu)) ** 2 / (2 * sigma ** 2)) return np.exp(-(float(x) - float(mu)) ** 2 / (2 * sigma ** 2))
@ -217,7 +221,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
np_kernel = np.outer(kernel_1d, kernel_1d).astype(dtype=K.floatx()) np_kernel = np.outer(kernel_1d, kernel_1d).astype(dtype=K.floatx())
kernel = np_kernel / np.sum(np_kernel) kernel = np_kernel / np.sum(np_kernel)
return kernel return kernel
gauss_kernel = make_kernel(radius) gauss_kernel = make_kernel(radius)
gauss_kernel = gauss_kernel[:, :,np.newaxis, np.newaxis] gauss_kernel = gauss_kernel[:, :,np.newaxis, np.newaxis]
@ -231,17 +235,17 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
return K.concatenate (outputs, axis=-1) return K.concatenate (outputs, axis=-1)
return func return func
nnlib.gaussian_blur = gaussian_blur nnlib.gaussian_blur = gaussian_blur
def style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, wnd_size=0, step_size=1): def style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, wnd_size=0, step_size=1):
if gaussian_blur_radius > 0.0: if gaussian_blur_radius > 0.0:
gblur = gaussian_blur(gaussian_blur_radius) gblur = gaussian_blur(gaussian_blur_radius)
def sd(content, style, loss_weight): def sd(content, style, loss_weight):
content_nc = K.int_shape(content)[-1] content_nc = K.int_shape(content)[-1]
style_nc = K.int_shape(style)[-1] style_nc = K.int_shape(style)[-1]
if content_nc != style_nc: if content_nc != style_nc:
raise Exception("style_loss() content_nc != style_nc") raise Exception("style_loss() content_nc != style_nc")
axes = [1,2] axes = [1,2]
c_mean, c_var = K.mean(content, axis=axes, keepdims=True), K.var(content, axis=axes, keepdims=True) c_mean, c_var = K.mean(content, axis=axes, keepdims=True), K.var(content, axis=axes, keepdims=True)
s_mean, s_var = K.mean(style, axis=axes, keepdims=True), K.var(style, axis=axes, keepdims=True) s_mean, s_var = K.mean(style, axis=axes, keepdims=True), K.var(style, axis=axes, keepdims=True)
@ -249,9 +253,9 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
mean_loss = K.sum(K.square(c_mean-s_mean)) mean_loss = K.sum(K.square(c_mean-s_mean))
std_loss = K.sum(K.square(c_std-s_std)) std_loss = K.sum(K.square(c_std-s_std))
return (mean_loss + std_loss) * ( loss_weight / float(content_nc) ) return (mean_loss + std_loss) * ( loss_weight / float(content_nc) )
def func(target, style): def func(target, style):
if wnd_size == 0: if wnd_size == 0:
if gaussian_blur_radius > 0.0: if gaussian_blur_radius > 0.0:
@ -262,9 +266,9 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
#currently unused #currently unused
if nnlib.tf is not None: if nnlib.tf is not None:
sh = K.int_shape(target)[1] sh = K.int_shape(target)[1]
k = (sh-wnd_size) // step_size + 1 k = (sh-wnd_size) // step_size + 1
if gaussian_blur_radius > 0.0: if gaussian_blur_radius > 0.0:
target, style = gblur(target), gblur(style) target, style = gblur(target), gblur(style)
target = nnlib.tf.image.extract_image_patches(target, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID') target = nnlib.tf.image.extract_image_patches(target, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID')
style = nnlib.tf.image.extract_image_patches(style, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID') style = nnlib.tf.image.extract_image_patches(style, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID')
return sd( target, style, loss_weight ) return sd( target, style, loss_weight )
@ -272,8 +276,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
print ("Sorry, plaidML backend does not support style_loss") print ("Sorry, plaidML backend does not support style_loss")
return 0 return 0
return func return func
nnlib.style_loss = style_loss nnlib.style_loss = style_loss
def dssim(kernel_size=11, k1=0.01, k2=0.03, max_value=1.0): def dssim(kernel_size=11, k1=0.01, k2=0.03, max_value=1.0):
# port of tf.image.ssim to pure keras in order to work on plaidML backend. # port of tf.image.ssim to pure keras in order to work on plaidML backend.
@ -289,10 +293,10 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
g = np.reshape (g, (1,-1)) + np.reshape(g, (-1,1) ) g = np.reshape (g, (1,-1)) + np.reshape(g, (-1,1) )
g = K.constant ( np.reshape (g, (1,-1)) ) g = K.constant ( np.reshape (g, (1,-1)) )
g = K.softmax(g) g = K.softmax(g)
g = K.reshape (g, (size, size, 1, 1)) g = K.reshape (g, (size, size, 1, 1))
g = K.tile (g, (1,1,ch,1)) g = K.tile (g, (1,1,ch,1))
return g return g
kernel = _fspecial_gauss(kernel_size,1.5) kernel = _fspecial_gauss(kernel_size,1.5)
def reducer(x): def reducer(x):
@ -300,13 +304,13 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
c1 = (k1 * max_value) ** 2 c1 = (k1 * max_value) ** 2
c2 = (k2 * max_value) ** 2 c2 = (k2 * max_value) ** 2
mean0 = reducer(y_true) mean0 = reducer(y_true)
mean1 = reducer(y_pred) mean1 = reducer(y_pred)
num0 = mean0 * mean1 * 2.0 num0 = mean0 * mean1 * 2.0
den0 = K.square(mean0) + K.square(mean1) den0 = K.square(mean0) + K.square(mean1)
luminance = (num0 + c1) / (den0 + c1) luminance = (num0 + c1) / (den0 + c1)
num1 = reducer(y_true * y_pred) * 2.0 num1 = reducer(y_true * y_pred) * 2.0
den1 = reducer(K.square(y_true) + K.square(y_pred)) den1 = reducer(K.square(y_true) + K.square(y_pred))
c2 *= 1.0 #compensation factor c2 *= 1.0 #compensation factor
@ -316,10 +320,10 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
return K.mean( (1.0 - ssim_val ) / 2.0 ) return K.mean( (1.0 - ssim_val ) / 2.0 )
return func return func
nnlib.dssim = dssim nnlib.dssim = dssim
class PixelShuffler(keras.layers.Layer): class PixelShuffler(KL.Layer):
def __init__(self, size=(2, 2), data_format='channels_last', **kwargs): def __init__(self, size=(2, 2), data_format='channels_last', **kwargs):
super(PixelShuffler, self).__init__(**kwargs) super(PixelShuffler, self).__init__(**kwargs)
self.data_format = data_format self.data_format = data_format
@ -397,11 +401,11 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
base_config = super(PixelShuffler, self).get_config() base_config = super(PixelShuffler, self).get_config()
return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
nnlib.PixelShuffler = PixelShuffler nnlib.PixelShuffler = PixelShuffler
nnlib.SubpixelUpscaler = PixelShuffler nnlib.SubpixelUpscaler = PixelShuffler
class Scale(keras.layers.Layer): class Scale(KL.Layer):
""" """
GAN Custom Scal Layer GAN Custom Scal Layer
Code borrows from https://github.com/flyyufelix/cnn_finetune Code borrows from https://github.com/flyyufelix/cnn_finetune
@ -496,25 +500,25 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
else: else:
vhats = [K.zeros(1) for _ in params] vhats = [K.zeros(1) for _ in params]
if e: e.__exit__(None, None, None) if e: e.__exit__(None, None, None)
self.weights = [self.iterations] + ms + vs + vhats self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None
if e: e.__enter__() if e: e.__enter__()
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
if self.amsgrad: if self.amsgrad:
vhat_t = K.maximum(vhat, v_t) vhat_t = K.maximum(vhat, v_t)
self.updates.append(K.update(vhat, vhat_t)) self.updates.append(K.update(vhat, vhat_t))
if e: e.__exit__(None, None, None) if e: e.__exit__(None, None, None)
if self.amsgrad: if self.amsgrad:
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon) p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
else: else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t)) self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t)) self.updates.append(K.update(v, v_t))
new_p = p_t new_p = p_t
@ -537,13 +541,13 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
nnlib.Adam = Adam nnlib.Adam = Adam
def CAInitializerMP( conv_weights_list ): def CAInitializerMP( conv_weights_list ):
result = CAInitializerMPSubprocessor ( [ (i, K.int_shape(conv_weights)) for i, conv_weights in enumerate(conv_weights_list) ], K.floatx(), K.image_data_format() ).run() result = CAInitializerMPSubprocessor ( [ (i, K.int_shape(conv_weights)) for i, conv_weights in enumerate(conv_weights_list) ], K.floatx(), K.image_data_format() ).run()
for idx, weights in result: for idx, weights in result:
K.set_value ( conv_weights_list[idx], weights ) K.set_value ( conv_weights_list[idx], weights )
nnlib.CAInitializerMP = CAInitializerMP nnlib.CAInitializerMP = CAInitializerMP
''' '''
not implemented in plaidML not implemented in plaidML
class ReflectionPadding2D(keras.layers.Layer): class ReflectionPadding2D(keras.layers.Layer):
@ -559,25 +563,25 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
def call(self, x, mask=None): def call(self, x, mask=None):
w_pad,h_pad = self.padding w_pad,h_pad = self.padding
return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT') return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT')
nnlib.ReflectionPadding2D = ReflectionPadding2D nnlib.ReflectionPadding2D = ReflectionPadding2D
''' '''
@staticmethod @staticmethod
def import_keras_contrib(device_config): def import_keras_contrib(device_config):
if nnlib.keras_contrib is not None: if nnlib.keras_contrib is not None:
return nnlib.code_import_keras_contrib return nnlib.code_import_keras_contrib
import keras_contrib as keras_contrib_ import keras_contrib as keras_contrib_
nnlib.keras_contrib = keras_contrib_ nnlib.keras_contrib = keras_contrib_
nnlib.__initialize_keras_contrib_functions() nnlib.__initialize_keras_contrib_functions()
nnlib.code_import_keras_contrib = compile (nnlib.code_import_keras_contrib_string,'','exec') nnlib.code_import_keras_contrib = compile (nnlib.code_import_keras_contrib_string,'','exec')
@staticmethod @staticmethod
def __initialize_keras_contrib_functions(): def __initialize_keras_contrib_functions():
pass pass
@staticmethod @staticmethod
def import_dlib( device_config = None): def import_dlib( device_config = None):
if nnlib.dlib is not None: if nnlib.dlib is not None:
@ -586,10 +590,10 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
import dlib as dlib_ import dlib as dlib_
nnlib.dlib = dlib_ nnlib.dlib = dlib_
if not device_config.cpu_only and "tensorflow" in device_config.backend and len(device_config.gpu_idxs) > 0: if not device_config.cpu_only and "tensorflow" in device_config.backend and len(device_config.gpu_idxs) > 0:
nnlib.dlib.cuda.set_device(device_config.gpu_idxs[0]) nnlib.dlib.cuda.set_device(device_config.gpu_idxs[0])
nnlib.code_import_dlib = compile (nnlib.code_import_dlib_string,'','exec') nnlib.code_import_dlib = compile (nnlib.code_import_dlib_string,'','exec')
@staticmethod @staticmethod
def import_all(device_config = None): def import_all(device_config = None):
if nnlib.code_import_all is None: if nnlib.code_import_all is None:
@ -597,35 +601,35 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
device_config = nnlib.active_DeviceConfig device_config = nnlib.active_DeviceConfig
else: else:
nnlib.active_DeviceConfig = device_config nnlib.active_DeviceConfig = device_config
nnlib.import_keras(device_config) nnlib.import_keras(device_config)
nnlib.import_keras_contrib(device_config) nnlib.import_keras_contrib(device_config)
nnlib.code_import_all = compile (nnlib.code_import_keras_string + '\n' nnlib.code_import_all = compile (nnlib.code_import_keras_string + '\n'
+ nnlib.code_import_keras_contrib_string + nnlib.code_import_keras_contrib_string
+ nnlib.code_import_all_string,'','exec') + nnlib.code_import_all_string,'','exec')
nnlib.__initialize_all_functions() nnlib.__initialize_all_functions()
return nnlib.code_import_all return nnlib.code_import_all
@staticmethod @staticmethod
def __initialize_all_functions(): def __initialize_all_functions():
exec (nnlib.import_keras(nnlib.active_DeviceConfig), locals(), globals()) exec (nnlib.import_keras(nnlib.active_DeviceConfig), locals(), globals())
exec (nnlib.import_keras_contrib(nnlib.active_DeviceConfig), locals(), globals()) exec (nnlib.import_keras_contrib(nnlib.active_DeviceConfig), locals(), globals())
class DSSIMMSEMaskLoss(object): class DSSIMMSEMaskLoss(object):
def __init__(self, mask, is_mse=False): def __init__(self, mask, is_mse=False):
self.mask = mask self.mask = mask
self.is_mse = is_mse self.is_mse = is_mse
def __call__(self,y_true, y_pred): def __call__(self,y_true, y_pred):
total_loss = None total_loss = None
mask = self.mask mask = self.mask
if self.is_mse: if self.is_mse:
blur_mask = gaussian_blur(max(1, K.int_shape(mask)[1] // 64))(mask) blur_mask = gaussian_blur(max(1, K.int_shape(mask)[1] // 64))(mask)
return K.mean ( 50*K.square( y_true*blur_mask - y_pred*blur_mask ) ) return K.mean ( 50*K.square( y_true*blur_mask - y_pred*blur_mask ) )
else: else:
return 10*dssim() (y_true*mask, y_pred*mask) return 10*dssim() (y_true*mask, y_pred*mask)
nnlib.DSSIMMSEMaskLoss = DSSIMMSEMaskLoss nnlib.DSSIMMSEMaskLoss = DSSIMMSEMaskLoss
''' '''
def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False): def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False):
@ -639,59 +643,59 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
use_bias = False use_bias = False
def XNormalization(x): def XNormalization(x):
return BatchNormalization (axis=3, gamma_initializer=RandomNormal(1., 0.02))(x) return BatchNormalization (axis=3, gamma_initializer=RandomNormal(1., 0.02))(x)
def Conv2D (filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer=RandomNormal(0, 0.02), bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None): def Conv2D (filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer=RandomNormal(0, 0.02), bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None):
return keras.layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint ) return keras.layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint )
def Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None): def Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None):
return keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, output_padding=output_padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint) return keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, output_padding=output_padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint)
def func(input): def func(input):
def ResnetBlock(dim): def ResnetBlock(dim):
def func(input): def func(input):
x = input x = input
x = ReflectionPadding2D((1,1))(x) x = ReflectionPadding2D((1,1))(x)
x = Conv2D(dim, 3, 1, padding='valid')(x) x = Conv2D(dim, 3, 1, padding='valid')(x)
x = XNormalization(x) x = XNormalization(x)
x = ReLU()(x) x = ReLU()(x)
if use_dropout: if use_dropout:
x = Dropout(0.5)(x) x = Dropout(0.5)(x)
x = ReflectionPadding2D((1,1))(x) x = ReflectionPadding2D((1,1))(x)
x = Conv2D(dim, 3, 1, padding='valid')(x) x = Conv2D(dim, 3, 1, padding='valid')(x)
x = XNormalization(x) x = XNormalization(x)
x = ReLU()(x) x = ReLU()(x)
return Add()([x,input]) return Add()([x,input])
return func return func
x = input x = input
x = ReflectionPadding2D((3,3))(x) x = ReflectionPadding2D((3,3))(x)
x = Conv2D(ngf, 7, 1, 'valid')(x) x = Conv2D(ngf, 7, 1, 'valid')(x)
x = ReLU()(XNormalization(Conv2D(ngf*2, 4, 2, 'same')(x))) x = ReLU()(XNormalization(Conv2D(ngf*2, 4, 2, 'same')(x)))
x = ReLU()(XNormalization(Conv2D(ngf*4, 4, 2, 'same')(x))) x = ReLU()(XNormalization(Conv2D(ngf*4, 4, 2, 'same')(x)))
for i in range(n_blocks): for i in range(n_blocks):
x = ResnetBlock(ngf*4)(x) x = ResnetBlock(ngf*4)(x)
x = ReLU()(XNormalization(PixelShuffler()(Conv2D(ngf*2 *4, 3, 1, 'same')(x)))) x = ReLU()(XNormalization(PixelShuffler()(Conv2D(ngf*2 *4, 3, 1, 'same')(x))))
x = ReLU()(XNormalization(PixelShuffler()(Conv2D(ngf *4, 3, 1, 'same')(x)))) x = ReLU()(XNormalization(PixelShuffler()(Conv2D(ngf *4, 3, 1, 'same')(x))))
x = ReflectionPadding2D((3,3))(x) x = ReflectionPadding2D((3,3))(x)
x = Conv2D(output_nc, 7, 1, 'valid')(x) x = Conv2D(output_nc, 7, 1, 'valid')(x)
x = tanh(x) x = tanh(x)
return x return x
return func return func
nnlib.ResNet = ResNet nnlib.ResNet = ResNet
# Defines the Unet generator. # Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example, # |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1 # if |num_downs| == 7, image of size 128x128 will become of size 1x1
@ -707,76 +711,76 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
use_bias = False use_bias = False
def XNormalization(x): def XNormalization(x):
return BatchNormalization (axis=3, gamma_initializer=RandomNormal(1., 0.02))(x) return BatchNormalization (axis=3, gamma_initializer=RandomNormal(1., 0.02))(x)
def Conv2D (filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer=RandomNormal(0, 0.02), bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None): def Conv2D (filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer=RandomNormal(0, 0.02), bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None):
return keras.layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint ) return keras.layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint )
def Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None): def Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None):
return keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, output_padding=output_padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint) return keras.layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, output_padding=output_padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint)
def UNetSkipConnection(outer_nc, inner_nc, sub_model=None, outermost=False, innermost=False, use_dropout=False): def UNetSkipConnection(outer_nc, inner_nc, sub_model=None, outermost=False, innermost=False, use_dropout=False):
def func(inp): def func(inp):
x = inp x = inp
x = Conv2D(inner_nc, 4, 2, 'valid')(ReflectionPadding2D( (1,1) )(x)) x = Conv2D(inner_nc, 4, 2, 'valid')(ReflectionPadding2D( (1,1) )(x))
x = XNormalization(x) x = XNormalization(x)
x = ReLU()(x) x = ReLU()(x)
if not innermost: if not innermost:
x = sub_model(x) x = sub_model(x)
if not outermost: if not outermost:
x = Conv2DTranspose(outer_nc, 3, 2, 'same')(x) x = Conv2DTranspose(outer_nc, 3, 2, 'same')(x)
x = XNormalization(x) x = XNormalization(x)
x = ReLU()(x) x = ReLU()(x)
if not innermost: if not innermost:
if use_dropout: if use_dropout:
x = Dropout(0.5)(x) x = Dropout(0.5)(x)
x = Concatenate(axis=3)([inp, x]) x = Concatenate(axis=3)([inp, x])
else: else:
x = Conv2DTranspose(outer_nc, 3, 2, 'same')(x) x = Conv2DTranspose(outer_nc, 3, 2, 'same')(x)
x = tanh(x) x = tanh(x)
return x
return x
return func return func
def func(input): def func(input):
unet_block = UNetSkipConnection(ngf * 8, ngf * 8, sub_model=None, innermost=True) unet_block = UNetSkipConnection(ngf * 8, ngf * 8, sub_model=None, innermost=True)
for i in range(num_downs - 5): for i in range(num_downs - 5):
unet_block = UNetSkipConnection(ngf * 8, ngf * 8, sub_model=unet_block, use_dropout=use_dropout) unet_block = UNetSkipConnection(ngf * 8, ngf * 8, sub_model=unet_block, use_dropout=use_dropout)
unet_block = UNetSkipConnection(ngf * 4 , ngf * 8, sub_model=unet_block) unet_block = UNetSkipConnection(ngf * 4 , ngf * 8, sub_model=unet_block)
unet_block = UNetSkipConnection(ngf * 2 , ngf * 4, sub_model=unet_block) unet_block = UNetSkipConnection(ngf * 2 , ngf * 4, sub_model=unet_block)
unet_block = UNetSkipConnection(ngf , ngf * 2, sub_model=unet_block) unet_block = UNetSkipConnection(ngf , ngf * 2, sub_model=unet_block)
unet_block = UNetSkipConnection(output_nc, ngf , sub_model=unet_block, outermost=True) unet_block = UNetSkipConnection(output_nc, ngf , sub_model=unet_block, outermost=True)
return unet_block(input) return unet_block(input)
return func return func
nnlib.UNet = UNet nnlib.UNet = UNet
#predicts based on two past_image_tensors #predicts based on two past_image_tensors
def UNetTemporalPredictor(output_nc, use_batch_norm, num_downs, ngf=64, use_dropout=False): def UNetTemporalPredictor(output_nc, use_batch_norm, num_downs, ngf=64, use_dropout=False):
exec (nnlib.import_all(), locals(), globals()) exec (nnlib.import_all(), locals(), globals())
def func(inputs): def func(inputs):
past_2_image_tensor, past_1_image_tensor = inputs past_2_image_tensor, past_1_image_tensor = inputs
x = Concatenate(axis=3)([ past_2_image_tensor, past_1_image_tensor ]) x = Concatenate(axis=3)([ past_2_image_tensor, past_1_image_tensor ])
x = UNet(3, use_batch_norm, num_downs=num_downs, ngf=ngf, use_dropout=use_dropout) (x) x = UNet(3, use_batch_norm, num_downs=num_downs, ngf=ngf, use_dropout=use_dropout) (x)
return x return x
return func return func
nnlib.UNetTemporalPredictor = UNetTemporalPredictor nnlib.UNetTemporalPredictor = UNetTemporalPredictor
def NLayerDiscriminator(use_batch_norm, ndf=64, n_layers=3): def NLayerDiscriminator(use_batch_norm, ndf=64, n_layers=3):
exec (nnlib.import_all(), locals(), globals()) exec (nnlib.import_all(), locals(), globals())
if not use_batch_norm: if not use_batch_norm:
use_bias = True use_bias = True
def XNormalization(x): def XNormalization(x):
@ -791,22 +795,22 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
def func(input): def func(input):
x = input x = input
x = ZeroPadding2D((1,1))(x) x = ZeroPadding2D((1,1))(x)
x = Conv2D( ndf, 4, 2, 'valid')(x) x = Conv2D( ndf, 4, 2, 'valid')(x)
x = LeakyReLU(0.2)(x) x = LeakyReLU(0.2)(x)
for i in range(1, n_layers): for i in range(1, n_layers):
x = ZeroPadding2D((1,1))(x) x = ZeroPadding2D((1,1))(x)
x = Conv2D( ndf * min(2 ** i, 8), 4, 2, 'valid')(x) x = Conv2D( ndf * min(2 ** i, 8), 4, 2, 'valid')(x)
x = XNormalization(x) x = XNormalization(x)
x = LeakyReLU(0.2)(x) x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x) x = ZeroPadding2D((1,1))(x)
x = Conv2D( ndf * min(2 ** n_layers, 8), 4, 1, 'valid')(x) x = Conv2D( ndf * min(2 ** n_layers, 8), 4, 1, 'valid')(x)
x = XNormalization(x) x = XNormalization(x)
x = LeakyReLU(0.2)(x) x = LeakyReLU(0.2)(x)
x = ZeroPadding2D((1,1))(x) x = ZeroPadding2D((1,1))(x)
return Conv2D( 1, 4, 1, 'valid')(x) return Conv2D( 1, 4, 1, 'valid')(x)
return func return func
@ -816,7 +820,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
def finalize_all(): def finalize_all():
if nnlib.keras_contrib is not None: if nnlib.keras_contrib is not None:
nnlib.keras_contrib = None nnlib.keras_contrib = None
if nnlib.keras is not None: if nnlib.keras is not None:
nnlib.keras.backend.clear_session() nnlib.keras.backend.clear_session()
nnlib.keras = None nnlib.keras = None
@ -824,11 +828,11 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
if nnlib.tf is not None: if nnlib.tf is not None:
nnlib.tf_sess = None nnlib.tf_sess = None
nnlib.tf = None nnlib.tf = None
class CAInitializerMPSubprocessor(Subprocessor): class CAInitializerMPSubprocessor(Subprocessor):
class Cli(Subprocessor.Cli): class Cli(Subprocessor.Cli):
#override #override
def on_initialize(self, client_dict): def on_initialize(self, client_dict):
self.floatx = client_dict['floatx'] self.floatx = client_dict['floatx']
@ -836,7 +840,7 @@ class CAInitializerMPSubprocessor(Subprocessor):
#override #override
def process_data(self, data): def process_data(self, data):
idx, shape = data idx, shape = data
weights = CAGenerateWeights (shape, self.floatx, self.data_format) weights = CAGenerateWeights (shape, self.floatx, self.data_format)
return idx, weights return idx, weights
@ -844,30 +848,30 @@ class CAInitializerMPSubprocessor(Subprocessor):
def get_data_name (self, data): def get_data_name (self, data):
#return string identificator of your data #return string identificator of your data
return "undefined" return "undefined"
#override #override
def __init__(self, idx_shapes_list, floatx, data_format ): def __init__(self, idx_shapes_list, floatx, data_format ):
self.idx_shapes_list = idx_shapes_list self.idx_shapes_list = idx_shapes_list
self.floatx = floatx self.floatx = floatx
self.data_format = data_format self.data_format = data_format
self.result = [] self.result = []
super().__init__('CAInitializerMP', CAInitializerMPSubprocessor.Cli) super().__init__('CAInitializerMP', CAInitializerMPSubprocessor.Cli)
#override #override
def on_clients_initialized(self): def on_clients_initialized(self):
io.progress_bar ("Processing", len (self.idx_shapes_list)) io.progress_bar ("Processing", len (self.idx_shapes_list))
#override #override
def on_clients_finalized(self): def on_clients_finalized(self):
io.progress_bar_close() io.progress_bar_close()
#override #override
def process_info_generator(self): def process_info_generator(self):
for i in range(multiprocessing.cpu_count()): for i in range(multiprocessing.cpu_count()):
yield 'CPU%d' % (i), {}, {'device_idx': i, yield 'CPU%d' % (i), {}, {'device_idx': i,
'device_name': 'CPU%d' % (i), 'device_name': 'CPU%d' % (i),
'floatx' : self.floatx, 'floatx' : self.floatx,
'data_format' : self.data_format 'data_format' : self.data_format
} }
@ -875,20 +879,19 @@ class CAInitializerMPSubprocessor(Subprocessor):
#override #override
def get_data(self, host_dict): def get_data(self, host_dict):
if len (self.idx_shapes_list) > 0: if len (self.idx_shapes_list) > 0:
return self.idx_shapes_list.pop(0) return self.idx_shapes_list.pop(0)
return None return None
#override #override
def on_data_return (self, host_dict, data): def on_data_return (self, host_dict, data):
self.idx_shapes_list.insert(0, data) self.idx_shapes_list.insert(0, data)
#override #override
def on_result (self, host_dict, data, result): def on_result (self, host_dict, data, result):
self.result.append ( result ) self.result.append ( result )
io.progress_bar_inc(1) io.progress_bar_inc(1)
#override #override
def get_result(self): def get_result(self):
return self.result return self.result