From 72ba6b103cbfc28a30617341480b2cfd73afbea6 Mon Sep 17 00:00:00 2001 From: iperov Date: Tue, 19 Feb 2019 17:33:12 +0400 Subject: [PATCH] added support of AMD videocards added Intel's plaidML backend to use OpenCL engine. Check new requirements. smart choosing of backend in device.py env var 'force_plaidML' can be choosed to forced using plaidML all tf functions transferred to pure keras MTCNN transferred to pure keras, but it works slow on plaidML (forced to CPU in this case) default batch size for all models and VRAMs now 4, feel free to adjust it on your own SAE: default style options now ZERO, because there are no best values for all scenes, set them on your own. SAE: return back option pixel_loss, feel free to enable it on your own. SAE: added option multiscale_decoder default is true, but you can disable it to get 100% same as H,DF,LIAEF model behaviour. fix converter output to .png added linux fork reference to doc/doc_build_and_repository_info.md --- __dev/port.py | 344 +++++ __dev/test.py | 1282 +++++++++++++++++ doc/doc_build_and_repository_info.md | 1 + facelib/MTCExtractor.py | 336 ++++- facelib/det1.npy | Bin 27368 -> 0 bytes facelib/mtcnn.py | 761 ---------- facelib/{det3.npy => mtcnn_onet.h5} | Bin 1557360 -> 1609720 bytes facelib/mtcnn_pnet.h5 | Bin 0 -> 63448 bytes facelib/{det2.npy => mtcnn_rnet.h5} | Bin 401681 -> 443728 bytes main.py | 2 + mainscripts/Converter.py | 3 +- mainscripts/Extractor.py | 79 +- models/ModelBase.py | 2 +- models/Model_DF/Model.py | 4 +- models/Model_H128/Model.py | 4 +- models/Model_H64/Model.py | 6 +- models/Model_LIAEF128/Model.py | 4 +- models/Model_SAE/Model.py | 189 +-- nnlib/device.py | 333 +++++ nnlib/devicelib.py | 186 --- nnlib/nnlib.py | 578 ++++---- ...> requirements-gpu-opencl-cuda9-cudnn7.txt | 1 + samples/SampleProcessor.py | 45 +- utils/image_utils.py | 23 +- 24 files changed, 2694 insertions(+), 1489 deletions(-) create mode 100644 __dev/port.py create mode 100644 __dev/test.py delete mode 100644 facelib/det1.npy delete mode 100644 facelib/mtcnn.py rename facelib/{det3.npy => mtcnn_onet.h5} (95%) create mode 100644 facelib/mtcnn_pnet.h5 rename facelib/{det2.npy => mtcnn_rnet.h5} (88%) create mode 100644 nnlib/device.py delete mode 100644 nnlib/devicelib.py rename requirements-gpu-cuda9-cudnn7.txt => requirements-gpu-opencl-cuda9-cudnn7.txt (90%) diff --git a/__dev/port.py b/__dev/port.py new file mode 100644 index 0000000..8d6bc06 --- /dev/null +++ b/__dev/port.py @@ -0,0 +1,344 @@ +#import FaceLandmarksExtractor + + +import numpy as np +import dlib +import torch +import keras +from keras import backend as K +from keras import layers as KL +import math +import os +import time +import code + +class TorchBatchNorm2D(keras.engine.topology.Layer): + def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, **kwargs): + super(TorchBatchNorm2D, self).__init__(**kwargs) + self.supports_masking = True + self.axis = axis + self.momentum = momentum + self.epsilon = epsilon + + def build(self, input_shape): + dim = input_shape[self.axis] + if dim is None: + raise ValueError('Axis ' + str(self.axis) + ' of ' + 'input tensor should have a defined dimension ' + 'but the layer received an input with shape ' + + str(input_shape) + '.') + shape = (dim,) + self.gamma = self.add_weight(shape=shape, name='gamma', initializer='ones', regularizer=None, constraint=None) + self.beta = self.add_weight(shape=shape, name='beta', initializer='zeros', regularizer=None, constraint=None) + self.moving_mean = self.add_weight(shape=shape, name='moving_mean', initializer='zeros', trainable=False) + self.moving_variance = self.add_weight(shape=shape, name='moving_variance', initializer='ones', trainable=False) + self.built = True + + def call(self, inputs, training=None): + input_shape = K.int_shape(inputs) + + broadcast_shape = [1] * len(input_shape) + broadcast_shape[self.axis] = input_shape[self.axis] + + broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape) + broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape) + broadcast_gamma = K.reshape(self.gamma, broadcast_shape) + broadcast_beta = K.reshape(self.beta, broadcast_shape) + invstd = K.ones (shape=broadcast_shape, dtype='float32') / K.sqrt(broadcast_moving_variance + K.constant(self.epsilon, dtype='float32')) + + return (inputs - broadcast_moving_mean) * invstd * broadcast_gamma + broadcast_beta + + def get_config(self): + config = { 'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon } + base_config = super(TorchBatchNorm2D, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + +def t2kw_conv2d (src): + if src.bias is not None: + return [ np.moveaxis(src.weight.data.cpu().numpy(), [0,1,2,3], [3,2,0,1]), src.bias.data.cpu().numpy() ] + else: + return [ np.moveaxis(src.weight.data.cpu().numpy(), [0,1,2,3], [3,2,0,1])] + + +def t2kw_bn2d(src): + return [ src.weight.data.cpu().numpy(), src.bias.data.cpu().numpy(), src.running_mean.cpu().numpy(), src.running_var.cpu().numpy() ] + + + +import face_alignment +fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,enable_cuda=False,enable_cudnn=False,use_cnn_face_detector=True).face_alignemnt_net +fa.eval() + + +def KerasConvBlock(in_planes, out_planes, input, srctorch): + out1 = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(srctorch.bn1) )(input) + out1 = KL.Activation( keras.backend.relu ) (out1) + out1 = KL.ZeroPadding2D(padding=(1, 1), data_format='channels_first')(out1) + out1 = KL.convolutional.Conv2D( int(out_planes/2), kernel_size=3, strides=1, data_format='channels_first', padding='valid', use_bias = False, weights=t2kw_conv2d(srctorch.conv1) ) (out1) + + out2 = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(srctorch.bn2) )(out1) + out2 = KL.Activation( keras.backend.relu ) (out2) + out2 = KL.ZeroPadding2D(padding=(1, 1), data_format='channels_first')(out2) + out2 = KL.convolutional.Conv2D( int(out_planes/4), kernel_size=3, strides=1, data_format='channels_first', padding='valid', use_bias = False, weights=t2kw_conv2d(srctorch.conv2) ) (out2) + + out3 = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(srctorch.bn3) )(out2) + out3 = KL.Activation( keras.backend.relu ) (out3) + out3 = KL.ZeroPadding2D(padding=(1, 1), data_format='channels_first')(out3) + out3 = KL.convolutional.Conv2D( int(out_planes/4), kernel_size=3, strides=1, data_format='channels_first', padding='valid', use_bias = False, weights=t2kw_conv2d(srctorch.conv3) ) (out3) + + out3 = KL.Concatenate(axis=1)([out1, out2, out3]) + + if in_planes != out_planes: + downsample = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(srctorch.downsample[0]) )(input) + downsample = KL.Activation( keras.backend.relu ) (downsample) + downsample = KL.convolutional.Conv2D( out_planes, kernel_size=1, strides=1, data_format='channels_first', padding='valid', use_bias = False, weights=t2kw_conv2d(srctorch.downsample[2]) ) (downsample) + out3 = KL.add ( [out3, downsample] ) + else: + out3 = KL.add ( [out3, input] ) + + + return out3 + +def KerasHourGlass (depth, input, srctorch): + + up1 = KerasConvBlock(256, 256, input, srctorch._modules['b1_%d' % (depth)]) + + low1 = KL.AveragePooling2D (pool_size=2, strides=2, data_format='channels_first', padding='valid' )(input) + low1 = KerasConvBlock (256, 256, low1, srctorch._modules['b2_%d' % (depth)]) + + if depth > 1: + low2 = KerasHourGlass (depth-1, low1, srctorch) + else: + low2 = KerasConvBlock(256, 256, low1, srctorch._modules['b2_plus_%d' % (depth)]) + + low3 = KerasConvBlock(256, 256, low2, srctorch._modules['b3_%d' % (depth)]) + + up2 = KL.UpSampling2D(size=2, data_format='channels_first') (low3) + return KL.add ( [up1, up2] ) + +model_path = os.path.join( os.path.dirname(__file__) , "2DFAN-4.h5" ) +if os.path.exists (model_path): + t = time.time() + model = keras.models.load_model (model_path, custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D} ) + print ('load takes = %f' %( time.time() - t ) ) +else: + _input = keras.layers.Input ( shape=(3, 256,256) ) + x = KL.ZeroPadding2D(padding=(3, 3), data_format='channels_first')(_input) + x = KL.convolutional.Conv2D( 64, kernel_size=7, strides=2, data_format='channels_first', padding='valid', weights=t2kw_conv2d(fa.conv1) ) (x) + + x = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(fa.bn1) )(x) + x = KL.Activation( keras.backend.relu ) (x) + + x = KerasConvBlock (64, 128, x, fa.conv2) + x = KL.AveragePooling2D (pool_size=2, strides=2, data_format='channels_first', padding='valid' ) (x) + x = KerasConvBlock (128, 128, x, fa.conv3) + x = KerasConvBlock (128, 256, x, fa.conv4) + + outputs = [] + previous = x + for i in range(4): + ll = KerasHourGlass (4, previous, fa._modules['m%d' % (i) ]) + ll = KerasConvBlock (256,256, ll, fa._modules['top_m_%d' % (i)]) + + ll = KL.convolutional.Conv2D(256, kernel_size=1, strides=1, data_format='channels_first', padding='valid', weights=t2kw_conv2d( fa._modules['conv_last%d' % (i)] ) ) (ll) + ll = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d( fa._modules['bn_end%d' % (i)] ) )(ll) + ll = KL.Activation( keras.backend.relu ) (ll) + + tmp_out = KL.convolutional.Conv2D(68, kernel_size=1, strides=1, data_format='channels_first', padding='valid', weights=t2kw_conv2d( fa._modules['l%d' % (i)] ) ) (ll) + outputs.append(tmp_out) + if i < 4 - 1: + ll = KL.convolutional.Conv2D(256, kernel_size=1, strides=1, data_format='channels_first', padding='valid', weights=t2kw_conv2d( fa._modules['bl%d' % (i)] ) ) (ll) + previous = KL.add ( [previous, ll, KL.convolutional.Conv2D(256, kernel_size=1, strides=1, data_format='channels_first', padding='valid', weights=t2kw_conv2d( fa._modules['al%d' % (i)] ) ) (tmp_out) ] ) + + model = keras.models.Model (_input, outputs) + model.compile ( loss='mse', optimizer='adam' ) + model.save (model_path) + model.save_weights ( os.path.join( os.path.dirname(__file__) , 'weights.h5') ) + +def transform(point, center, scale, resolution, invert=False): + _pt = torch.ones(3) + _pt[0] = point[0] + _pt[1] = point[1] + + h = 200.0 * scale + t = torch.eye(3) + t[0, 0] = resolution / h + t[1, 1] = resolution / h + t[0, 2] = resolution * (-center[0] / h + 0.5) + t[1, 2] = resolution * (-center[1] / h + 0.5) + + if invert: + t = torch.inverse(t) + + new_point = (torch.matmul(t, _pt))[0:2] + + return new_point.int() + +def get_preds_fromhm(hm, center=None, scale=None): + max, idx = torch.max( hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2) + idx += 1 + preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float() + preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1) + preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1) + + for i in range(preds.size(0)): + for j in range(preds.size(1)): + hm_ = hm[i, j, :] + pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1 + if pX > 0 and pX < 63 and pY > 0 and pY < 63: + diff = torch.FloatTensor( + [hm_[pY, pX + 1] - hm_[pY, pX - 1], + hm_[pY + 1, pX] - hm_[pY - 1, pX]]) + preds[i, j].add_(diff.sign_().mul_(.25)) + + preds.add_(-.5) + + preds_orig = torch.zeros(preds.size()) + if center is not None and scale is not None: + for i in range(hm.size(0)): + for j in range(hm.size(1)): + preds_orig[i, j] = transform( + preds[i, j], center, scale, hm.size(2), True) + + return preds, preds_orig + + +def get_preds_fromhm2(a, center=None, scale=None): + b = a.reshape ( (a.shape[0], a.shape[1]*a.shape[2]) ) + c = b.argmax(1).reshape ( (a.shape[0], 1) ).repeat(2, axis=1).astype(np.float) + c[:,0] %= a.shape[2] + c[:,1] = np.apply_along_axis ( lambda x: np.floor(x / a.shape[2]), 0, c[:,1] ) + + for i in range(a.shape[0]): + pX, pY = int(c[i,0]), int(c[i,1]) + if pX > 0 and pX < 63 and pY > 0 and pY < 63: + diff = np.array ( [a[i,pY,pX+1]-a[i,pY,pX-1], a[i,pY+1,pX]-a[i,pY-1,pX]] ) + c[i] += np.sign(diff)*0.25 + + c += 0.5 + result = np.empty ( (a.shape[0],2), dtype=np.int ) + if center is not None and scale is not None: + for i in range(a.shape[0]): + pt = np.array ( [c[i][0], c[i][1], 1.0] ) + h = 200.0 * scale + m = np.eye(3) + m[0,0] = a.shape[2] / h + m[1,1] = a.shape[2] / h + m[0,2] = a.shape[2] * ( -center[0] / h + 0.5 ) + m[1,2] = a.shape[2] * ( -center[1] / h + 0.5 ) + m = np.linalg.inv(m) + result[i] = np.matmul (m, pt)[0:2].astype( np.int ) + return result + + + +rnd_data = np.random.rand (3, 256,256).astype(np.float32) +#rnd_data = np.random.random_integers (2, size=(3, 256,256)).astype(np.float32) +#rnd_data = np.array ( [[[1]*256]*256]*3 , dtype=np.float32 ) +input_data = np.array ([rnd_data]) + +fa_out_tensor = fa( torch.autograd.Variable( torch.from_numpy(input_data), volatile=True) )[-1].data.cpu() +fa_out = fa_out_tensor.numpy() + +t = time.time() +m_out = model.predict ( input_data )[-1] +print ('predict takes = %f' %( time.time() - t ) ) +t = time.time() + +#fa_base_out = fa_base(torch.autograd.Variable( torch.from_numpy(input_data), volatile=True))[0].data.cpu().numpy() + +print ( 'shapes = %s , %s , equal == %s ' % (fa_out.shape, m_out.shape, (fa_out.shape == m_out.shape) ) ) +print ( 'allclose == %s' % ( np.allclose(fa_out, m_out) ) ) +print ( 'total abs diff outputs = %f' % ( np.sum ( np.abs(np.ndarray.flatten(fa_out-m_out))) )) + +### +d = dlib.rectangle(156,364,424,765) + +center = torch.FloatTensor( + [d.right() - (d.right() - d.left()) / 2.0, d.bottom() - + (d.bottom() - d.top()) / 2.0]) +center[1] = center[1] - (d.bottom() - d.top()) * 0.12 +scale = (d.right() - d.left() + d.bottom() - d.top()) / 195.0 +pts, pts_img = get_preds_fromhm (fa_out_tensor, center, scale) +pts_img = pts_img.view(68, 2).numpy() + +### + +m_pts_img = get_preds_fromhm2 (m_out[0], center, scale) + +print ('pts1 == pts2 == %s' % ( np.array_equal(pts_img, m_pts_img) ) ) + +code.interact(local=dict(globals(), **locals())) + +#print ( np.array_equal (fa_out, m_out) ) #>>> False +#code.interact(local=dict(globals(), **locals())) + +#code.interact(local=locals()) + +#code.interact(local=locals()) + +### +#fa.conv1.weight = torch.nn.Parameter( torch.from_numpy ( np.array( [[[[1.0]*7]*7]*3]*64, dtype=np.float32) ) ) +#fa.conv1.bias = torch.nn.Parameter( torch.from_numpy ( np.array( [1.0]*64, dtype=np.float32 ) ) ) +#model.layers[2].set_weights( [ np.array( [[[[1.0]*64]*3]*7]*7, dtype=np.float32), np.array( [1.0]*64, dtype=np.float32 ) ] ) + +#b = np.array( [1.0]*64, dtype=np.float32 ) +#b = np.random.rand (64).astype(np.float32) +#w = np.array( [[[[1.0]*7]*7]*3]*64, dtype=np.float32) +#w = np.random.rand (64, 3, 7, 7).astype(np.float32) +#s = w #fa_base.conv1.weight.data.cpu().numpy() #64x3x7x7 +#d = np.moveaxis(s, [0,1,2,3], [3,2,0,1] ) + + +#fa.conv1.weight = torch.nn.Parameter( torch.from_numpy ( w ) ) +#fa.conv1.bias = torch.nn.Parameter( torch.from_numpy ( b ) ) +#model.layers[2].set_weights( [np.transpose(w), b] ) +#model.layers[2].set_weights( [d, b] ) +''' +for i in range(0,64): + for j in range(0,128): + b = np.array_equal (fa_out[i,j], m_out[i,j]) + if b == False: + print ( '%d %d == False' %(i,j) ) #>>> False +''' + + +''' +input = -2.7966828 +gamma = 0.7640695571899414 +beta = 0.22801123559474945 +moving_mean = 0.12693816423416138 +moving_variance = 0.10409101098775864 +epsilon = 0.0 #0.00001 + +print ( gamma * (input - moving_mean) / math.sqrt(moving_variance + epsilon) + beta ) +print ( (input - moving_mean) * (1.0 / math.sqrt(moving_variance) + epsilon)*gamma + beta ) +''' +#code.interact(local=dict(globals(), **locals())) +''' +conv_64_128 = x +conv_64_128 = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(fa.conv2.bn1) )(conv_64_128) +conv_64_128 = KL.Activation( keras.backend.relu ) (conv_64_128) +conv_64_128 = KL.ZeroPadding2D(padding=(1, 1), data_format='channels_first')(conv_64_128) +conv_64_128 = KL.convolutional.Conv2D( 64, kernel_size=3, strides=1, data_format='channels_first', padding='valid', use_bias = False, weights=t2kw_conv2d(fa.conv2.conv1) ) (conv_64_128) +conv_64_128 = TorchBatchNorm2D(axis=1, momentum=0.1, epsilon=1e-05, weights=t2kw_bn2d(fa.conv2.bn2) )(conv_64_128) +conv_64_128 = KL.Activation( keras.backend.relu ) (conv_64_128) +''' +# +# +#keras result = gamma * (input - moving_mean) / sqrt(moving_variance + epsilon) + beta +# +# (input - mean / scale_factor) / sqrt(var / scale_factor + eps) +# +#input = -3.0322433 +# +#gamma = 0.1859646 +#beta = -0.17041835 +#moving_mean = -3.0345056 +#moving_variance = 8.773307 +#epsilon = 0.00001 +# +#result = - 0.17027631 +# +# fa result = 1.930317 \ No newline at end of file diff --git a/__dev/test.py b/__dev/test.py new file mode 100644 index 0000000..2bd2266 --- /dev/null +++ b/__dev/test.py @@ -0,0 +1,1282 @@ +import os +os.environ['force_plaidML'] = '1' + +import sys +import argparse +from utils import Path_utils +from utils import os_utils +from facelib import LandmarksProcessor +from pathlib import Path +import numpy as np +import cv2 +import time +import multiprocessing +import traceback +from tqdm import tqdm +from utils.DFLPNG import DFLPNG +from utils.DFLJPG import DFLJPG +from utils.cv2_utils import * +from utils import image_utils +import shutil + + + +def umeyama(src, dst, estimate_scale): + """Estimate N-D similarity transformation with or without scaling. + Parameters + ---------- + src : (M, N) array + Source coordinates. + dst : (M, N) array + Destination coordinates. + estimate_scale : bool + Whether to estimate scaling factor. + Returns + ------- + T : (N + 1, N + 1) + The homogeneous similarity transformation matrix. The matrix contains + NaN values only if the problem is not well-conditioned. + References + ---------- + .. [1] "Least-squares estimation of transformation parameters between two + point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 + """ + + num = src.shape[0] + dim = src.shape[1] + + # Compute mean of src and dst. + src_mean = src.mean(axis=0) + dst_mean = dst.mean(axis=0) + + # Subtract mean from src and dst. + src_demean = src - src_mean + dst_demean = dst - dst_mean + + # Eq. (38). + A = np.dot(dst_demean.T, src_demean) / num + + # Eq. (39). + d = np.ones((dim,), dtype=np.double) + if np.linalg.det(A) < 0: + d[dim - 1] = -1 + + T = np.eye(dim + 1, dtype=np.double) + + U, S, V = np.linalg.svd(A) + + # Eq. (40) and (43). + rank = np.linalg.matrix_rank(A) + if rank == 0: + return np.nan * T + elif rank == dim - 1: + if np.linalg.det(U) * np.linalg.det(V) > 0: + T[:dim, :dim] = np.dot(U, V) + else: + s = d[dim - 1] + d[dim - 1] = -1 + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V)) + d[dim - 1] = s + else: + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T)) + + if estimate_scale: + # Eq. (41) and (42). + scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d) + else: + scale = 1.0 + + T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T) + T[:dim, :dim] *= scale + + return T + +def random_transform(image, rotation_range=10, zoom_range=0.5, shift_range=0.05, random_flip=0): + h, w = image.shape[0:2] + rotation = np.random.uniform(-rotation_range, rotation_range) + scale = np.random.uniform(1 - zoom_range, 1 + zoom_range) + tx = np.random.uniform(-shift_range, shift_range) * w + ty = np.random.uniform(-shift_range, shift_range) * h + mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale) + mat[:, 2] += (tx, ty) + result = cv2.warpAffine( + image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE) + if np.random.random() < random_flip: + result = result[:, ::-1] + return result + +# get pair of random warped images from aligned face image +def random_warp(image, coverage=160, scale = 5, zoom = 1): + assert image.shape == (256, 256, 3) + range_ = np.linspace(128 - coverage//2, 128 + coverage//2, 5) + mapx = np.broadcast_to(range_, (5, 5)) + mapy = mapx.T + + mapx = mapx + np.random.normal(size=(5,5), scale=scale) + mapy = mapy + np.random.normal(size=(5,5), scale=scale) + + interp_mapx = cv2.resize(mapx, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32') + interp_mapy = cv2.resize(mapy, (80*zoom,80*zoom))[8*zoom:72*zoom,8*zoom:72*zoom].astype('float32') + + warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR) + + src_points = np.stack([mapx.ravel(), mapy.ravel() ], axis=-1) + dst_points = np.mgrid[0:65*zoom:16*zoom,0:65*zoom:16*zoom].T.reshape(-1,2) + mat = umeyama(src_points, dst_points, True)[0:2] + + target_image = cv2.warpAffine(image, mat, (64*zoom,64*zoom)) + + return warped_image, target_image + +def input_process(stdin_fd, sq, str): + sys.stdin = os.fdopen(stdin_fd) + try: + inp = input (str) + sq.put (True) + except: + sq.put (False) + +def input_in_time (str, max_time_sec): + sq = multiprocessing.Queue() + p = multiprocessing.Process(target=input_process, args=( sys.stdin.fileno(), sq, str)) + p.start() + t = time.time() + inp = False + while True: + if not sq.empty(): + inp = sq.get() + break + if time.time() - t > max_time_sec: + break + p.terminate() + sys.stdin = os.fdopen( sys.stdin.fileno() ) + return inp + + + +def subprocess(sq,cq): + prefetch = 2 + while True: + while prefetch > -1: + cq.put ( np.array([1]) ) #memory leak numpy==1.16.0 , but all fine in 1.15.4 + #cq.put ( [1] ) #no memory leak + prefetch -= 1 + + sq.get() #waiting msg from serv to continue posting + prefetch += 1 + + + +def get_image_hull_mask (image_shape, image_landmarks): + if len(image_landmarks) != 68: + raise Exception('get_image_hull_mask works only with 68 landmarks') + + hull_mask = np.zeros(image_shape[0:2]+(1,),dtype=np.float32) + + cv2.fillConvexPoly( hull_mask, cv2.convexHull( np.concatenate ( (image_landmarks[0:17], image_landmarks[48:], [image_landmarks[0]], [image_landmarks[8]], [image_landmarks[16]])) ), (1,) ) + cv2.fillConvexPoly( hull_mask, cv2.convexHull( np.concatenate ( (image_landmarks[27:31], [image_landmarks[33]]) ) ), (1,) ) + cv2.fillConvexPoly( hull_mask, cv2.convexHull( np.concatenate ( (image_landmarks[17:27], [image_landmarks[0]], [image_landmarks[27]], [image_landmarks[16]], [image_landmarks[33]])) ), (1,) ) + + return hull_mask + + +def umeyama(src, dst, estimate_scale): + """Estimate N-D similarity transformation with or without scaling. + Parameters + ---------- + src : (M, N) array + Source coordinates. + dst : (M, N) array + Destination coordinates. + estimate_scale : bool + Whether to estimate scaling factor. + Returns + ------- + T : (N + 1, N + 1) + The homogeneous similarity transformation matrix. The matrix contains + NaN values only if the problem is not well-conditioned. + References + ---------- + .. [1] "Least-squares estimation of transformation parameters between two + point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 + """ + + num = src.shape[0] + dim = src.shape[1] + + # Compute mean of src and dst. + src_mean = src.mean(axis=0) + dst_mean = dst.mean(axis=0) + + # Subtract mean from src and dst. + src_demean = src - src_mean + dst_demean = dst - dst_mean + + # Eq. (38). + A = np.dot(dst_demean.T, src_demean) / num + + # Eq. (39). + d = np.ones((dim,), dtype=np.double) + if np.linalg.det(A) < 0: + d[dim - 1] = -1 + + T = np.eye(dim + 1, dtype=np.double) + + U, S, V = np.linalg.svd(A) + + # Eq. (40) and (43). + rank = np.linalg.matrix_rank(A) + if rank == 0: + return np.nan * T + elif rank == dim - 1: + if np.linalg.det(U) * np.linalg.det(V) > 0: + T[:dim, :dim] = np.dot(U, V) + else: + s = d[dim - 1] + d[dim - 1] = -1 + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V)) + d[dim - 1] = s + else: + T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V.T)) + + if estimate_scale: + # Eq. (41) and (42). + scale = 1.0 / src_demean.var(axis=0).sum() * np.dot(S, d) + else: + scale = 1.0 + + T[:dim, dim] = dst_mean - scale * np.dot(T[:dim, :dim], src_mean.T) + T[:dim, :dim] *= scale + + return T + +mean_face_x = np.array([ +0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124, +0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036, +0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918, +0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149, +0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, +0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, +0.553364, 0.490127, 0.42689 ]) + +mean_face_y = np.array([ +0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891, +0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326, +0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733, +0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099, +0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805, +0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, +0.784792, 0.824182, 0.831803, 0.824182 ]) + +landmarks_2D = np.stack( [ mean_face_x, mean_face_y ], axis=1 ) + +def get_transform_mat (image_landmarks, output_size, scale=1.0): + if not isinstance(image_landmarks, np.ndarray): + image_landmarks = np.array (image_landmarks) + + padding = (output_size / 64) * 12 + + mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2] + mat = mat * (output_size - 2 * padding) + mat[:,2] += padding + mat *= (1 / scale) + mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 ) + + return mat + +#alignments = [] +# +#aligned_path_image_paths = Path_utils.get_image_paths("D:\\DeepFaceLab\\workspace issue\\data_dst\\aligned") +#for filepath in tqdm(aligned_path_image_paths, desc="Collecting alignments", ascii=True ): +# filepath = Path(filepath) +# +# if filepath.suffix == '.png': +# dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True ) +# elif filepath.suffix == '.jpg': +# dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True ) +# else: +# print ("%s is not a dfl image file" % (filepath.name) ) +# +# #source_filename_stem = Path( dflimg.get_source_filename() ).stem +# #if source_filename_stem not in alignments.keys(): +# # alignments[ source_filename_stem ] = [] +# +# #alignments[ source_filename_stem ].append (dflimg.get_source_landmarks()) +# alignments.append (dflimg.get_source_landmarks()) +import mathlib +def main(): + from nnlib import nnlib + exec( nnlib.import_all(), locals(), globals() ) + PMLTile = nnlib.PMLTile + PMLK = nnlib.PMLK + + image = cv2.imread('D:\\DeepFaceLab\\test\\00000.png').astype(np.float32) / 255.0 + image = cv2.resize ( image, (128,128) ) + + image = cv2.cvtColor (image, cv2.COLOR_BGR2GRAY) + image = np.expand_dims (image, -1) + image = np.expand_dims (image, 0) + image_shape = image.shape + + t = K.placeholder ( image_shape ) #K.constant ( np.ones ( (10,) ) ) + import code + code.interact(local=dict(globals(), **locals())) + + ''' + >>> t[:,0:64,64::2,:].source.op.code +function (I[N0, N1, N2, N3]) -> (O) { + +O[i0, i1, i2, i3: (1 + 1 - 1)/1, (64 + 1 - 1)/1, (64 + 2 - 1)/2, (1 + 1 - 1)/1] = + =(I[1*i0+0, 1*i1+0, 2*i2+64, 1*i3+0]); + + + Status GetWindowedOutputSizeVerboseV2(int64 input_size, int64 filter_size, + int64 dilation_rate, int64 stride, + Padding padding_type, int64* output_size, + int64* padding_before, + int64* padding_after) { + if (stride <= 0) { + return errors::InvalidArgument("Stride must be > 0, but got ", stride); + } + if (dilation_rate < 1) { + return errors::InvalidArgument("Dilation rate must be >= 1, but got ", + dilation_rate); + } + + // See also the parallel implementation in GetWindowedOutputSizeFromDimsV2. + int64 effective_filter_size = (filter_size - 1) * dilation_rate + 1; + switch (padding_type) { + case Padding::VALID: + *output_size = (input_size - effective_filter_size + stride) / stride; + *padding_before = *padding_after = 0; + break; + case Padding::EXPLICIT: + *output_size = (input_size + *padding_before + *padding_after - + effective_filter_size + stride) / + stride; + break; + case Padding::SAME: + *output_size = (input_size + stride - 1) / stride; + const int64 padding_needed = + std::max(int64{0}, (*output_size - 1) * stride + + effective_filter_size - input_size); + // For odd values of total padding, add more padding at the 'right' + // side of the given dimension. + *padding_before = padding_needed / 2; + *padding_after = padding_needed - *padding_before; + break; + } + if (*output_size < 0) { + return errors::InvalidArgument( + "Computed output size would be negative: ", *output_size, + " [input_size: ", input_size, + ", effective_filter_size: ", effective_filter_size, + ", stride: ", stride, "]"); + } + return Status::OK(); + } + ''' + class ExtractImagePatchesOP(PMLTile.Operation): + def __init__(self, input, ksizes, strides, rates, padding='valid'): + + batch, in_rows, in_cols, depth = input.shape.dims + + ksize_rows = ksizes[1]; + ksize_cols = ksizes[2]; + + stride_rows = strides[1]; + stride_cols = strides[2]; + + rate_rows = rates[1]; + rate_cols = rates[2]; + + ksize_rows_eff = ksize_rows + (ksize_rows - 1) * (rate_rows - 1); + ksize_cols_eff = ksize_cols + (ksize_cols - 1) * (rate_cols - 1); + + #if padding == 'valid': + + out_rows = (in_rows - ksize_rows_eff + stride_rows) / stride_rows; + out_cols = (in_cols - ksize_cols_eff + stride_cols) / stride_cols; + + out_sizes = (batch, out_rows, out_cols, ksize_rows * ksize_cols * depth); + + + + B, H, W, CI = input.shape.dims + + RATE = PMLK.constant ([1,rate,rate,1], dtype=PMLK.floatx() ) + + #print (target_dims) + code = """function (I[B, {H}, {W}, {CI} ], RATES[RB, RH, RW, RC] ) -> (O) { + + O[b, {wnd_size}, {wnd_size}, ] = =(I[b, h, w, ci]); + + }""".format(H=H, W=W, CI=CI, RATES=rates, wnd_size=wnd_size) + + super(ExtractImagePatchesOP, self).__init__(code, [('I', input) ], + [('O', PMLTile.Shape(input.shape.dtype, out_sizes ) )]) + + + + + f = ExtractImagePatchesOP.function(t, [1,65,65,1], [1,1,1,1], [1,1,1,1]) + + x, = K.function ([t],[f]) ([ image ]) + print(x.shape) + + import code + code.interact(local=dict(globals(), **locals())) + + + from nnlib import nnlib + exec( nnlib.import_all(), locals(), globals() ) + + #ch = 3 + #def softmax(x, axis=-1): #from K numpy backend + # y = np.exp(x - np.max(x, axis, keepdims=True)) + # return y / np.sum(y, axis, keepdims=True) + # + #def gauss_kernel(size, sigma): + # coords = np.arange(0,size, dtype=K.floatx() ) + # coords -= (size - 1 ) / 2.0 + # g = coords**2 + # g *= ( -0.5 / (sigma**2) ) + # g = np.reshape (g, (1,-1)) + np.reshape(g, (-1,1) ) + # g = np.reshape (g, (1,-1)) + # g = softmax(g) + # g = np.reshape (g, (size, size, 1, 1)) + # g = np.tile (g, (1,1,ch, size*size*ch)) + # return K.constant(g, dtype=K.floatx() ) + # + ##kernel = gauss_kernel(11,1.5) + #kernel = K.constant( np.ones ( (246,246, 3, 1) ) , dtype=K.floatx() ) + ##g = np.eye(9).reshape((3, 3, 1, 9)) + ##g = np.tile (g, (1,1,3,1)) + ##kernel = K.constant(g , dtype=K.floatx() ) + # + #def reducer(x): + # shape = K.shape(x) + # x = K.reshape(x, (-1, shape[-3] , shape[-2], shape[-1]) ) + # + # y = K.depthwise_conv2d(x, kernel, strides=(1, 1), padding='valid') + # + # y_shape = K.shape(y) + # return y#K.reshape(y, (shape[0], y_shape[1], y_shape[2], y_shape[3] ) ) + + image = cv2.imread('D:\\DeepFaceLab\\test\\00000.png').astype(np.float32) / 255.0 + image = cv2.resize ( image, (128,128) ) + + image = cv2.cvtColor (image, cv2.COLOR_BGR2GRAY) + image = np.expand_dims (image, -1) + image_shape = image.shape + + image2 = cv2.imread('D:\\DeepFaceLab\\test\\00001.png').astype(np.float32) / 255.0 + #image2 = cv2.cvtColor (image2, cv2.COLOR_BGR2GRAY) + #image2 = np.expand_dims (image2, -1) + image2_shape = image2.shape + + image_tensor = K.placeholder(shape=[ 1, image_shape[0], image_shape[1], image_shape[2] ], dtype="float32" ) + image2_tensor = K.placeholder(shape=[ 1, image_shape[0], image_shape[1], image_shape[2] ], dtype="float32" ) + + #loss = reducer(image_tensor) + #loss = K.reshape (loss, (-1,246,246, 11,11,3) ) + tf = nnlib.tf + + sh = K.int_shape(image_tensor)[1] + wnd_size = 16 + step_size = 8 + k = (sh-wnd_size) // step_size + 1 + + loss = tf.image.extract_image_patches(image_tensor, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID') + print(loss) + + f = K.function ( [image_tensor], [loss] ) + x = f ( [ np.expand_dims(image,0) ] )[0][0] + + import code + code.interact(local=dict(globals(), **locals())) + + for i in range( x.shape[2] ): + img = x[:,:,i:i+1] + + cv2.imshow('', (img*255).astype(np.uint8) ) + cv2.waitKey(0) + + #for i in range( len(x) ): + # for j in range ( len(x) ): + # img = x[i,j] + # import code + # code.interact(local=dict(globals(), **locals())) + # + # cv2.imshow('', (x[i,j]*255).astype(np.uint8) ) + # cv2.waitKey(0) + + import code + code.interact(local=dict(globals(), **locals())) + + + from nnlib import nnlib + exec( nnlib.import_all(), locals(), globals() ) + + PNet_Input = Input ( (None, None,3) ) + x = PNet_Input + x = Conv2D (10, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="PReLU1" )(x) + x = MaxPooling2D( pool_size=(2,2), strides=(2,2), padding='same' ) (x) + x = Conv2D (16, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="PReLU2" )(x) + x = Conv2D (32, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="PReLU3" )(x) + prob = Conv2D (2, kernel_size=(1,1), strides=(1,1), padding='valid', name="conv41")(x) + prob = Softmax()(prob) + x = Conv2D (4, kernel_size=(1,1), strides=(1,1), padding='valid', name="conv42")(x) + + PNet_model = Model(PNet_Input, [x,prob] ) + PNet_model.load_weights ( (Path(mtcnn.__file__).parent / 'mtcnn_pnet.h5').__str__() ) + + RNet_Input = Input ( (24, 24, 3) ) + x = RNet_Input + x = Conv2D (28, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="prelu1" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='same' ) (x) + x = Conv2D (48, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="prelu2" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='valid' ) (x) + x = Conv2D (64, kernel_size=(2,2), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="prelu3" )(x) + x = Lambda ( lambda x: K.reshape (x, (-1, np.prod(K.int_shape(x)[1:]),) ), output_shape=(np.prod(K.int_shape(x)[1:]),) ) (x) + x = Dense (128, name='conv4')(x) + x = PReLU (name="prelu4" )(x) + prob = Dense (2, name='conv51')(x) + prob = Softmax()(prob) + x = Dense (4, name='conv52')(x) + RNet_model = Model(RNet_Input, [x,prob] ) + RNet_model.load_weights ( (Path(mtcnn.__file__).parent / 'mtcnn_rnet.h5').__str__() ) + + ONet_Input = Input ( (48, 48, 3) ) + x = ONet_Input + x = Conv2D (32, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="prelu1" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='same' ) (x) + x = Conv2D (64, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="prelu2" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='valid' ) (x) + x = Conv2D (64, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="prelu3" )(x) + x = MaxPooling2D( pool_size=(2,2), strides=(2,2), padding='same' ) (x) + x = Conv2D (128, kernel_size=(2,2), strides=(1,1), padding='valid', name="conv4")(x) + x = PReLU (shared_axes=[1,2], name="prelu4" )(x) + x = Lambda ( lambda x: K.reshape (x, (-1, np.prod(K.int_shape(x)[1:]),) ), output_shape=(np.prod(K.int_shape(x)[1:]),) ) (x) + x = Dense (256, name='conv5')(x) + x = PReLU (name="prelu5" )(x) + prob = Dense (2, name='conv61')(x) + prob = Softmax()(prob) + x1 = Dense (4, name='conv62')(x) + x2 = Dense (10, name='conv63')(x) + ONet_model = Model(ONet_Input, [x1,x2,prob] ) + ONet_model.load_weights ( (Path(mtcnn.__file__).parent / 'mtcnn_onet.h5').__str__() ) + + pnet_fun = K.function ( PNet_model.inputs, PNet_model.outputs ) + rnet_fun = K.function ( RNet_model.inputs, RNet_model.outputs ) + onet_fun = K.function ( ONet_model.inputs, ONet_model.outputs ) + + pnet_test_data = np.random.uniform ( size=(1, 64,64,3) ) + pnet_result1, pnet_result2 = pnet_fun ([pnet_test_data]) + + rnet_test_data = np.random.uniform ( size=(1,24,24,3) ) + rnet_result1, rnet_result2 = rnet_fun ([rnet_test_data]) + + onet_test_data = np.random.uniform ( size=(1,48,48,3) ) + onet_result1, onet_result2, onet_result3 = onet_fun ([onet_test_data]) + + import code + code.interact(local=dict(globals(), **locals())) + + from nnlib import nnlib + #exec( nnlib.import_all( nnlib.device.Config(cpu_only=True) ), locals(), globals() )# nnlib.device.Config(cpu_only=True) + exec( nnlib.import_all(), locals(), globals() )# nnlib.device.Config(cpu_only=True) + + #det1_Input = Input ( (None, None,3) ) + #x = det1_Input + #x = Conv2D (10, kernel_size=(3,3), strides=(1,1), padding='valid')(x) + # + #import code + #code.interact(local=dict(globals(), **locals())) + + tf = nnlib.tf + tf_session = nnlib.tf_sess + + with tf.variable_scope('pnet2'): + data = tf.placeholder(tf.float32, (None,None,None,3), 'input') + pnet2 = mtcnn.PNet(tf, {'data':data}) + pnet2.load( (Path(mtcnn.__file__).parent / 'det1.npy').__str__(), tf_session) + with tf.variable_scope('rnet2'): + data = tf.placeholder(tf.float32, (None,24,24,3), 'input') + rnet2 = mtcnn.RNet(tf, {'data':data}) + rnet2.load( (Path(mtcnn.__file__).parent / 'det2.npy').__str__(), tf_session) + with tf.variable_scope('onet2'): + data = tf.placeholder(tf.float32, (None,48,48,3), 'input') + onet2 = mtcnn.ONet(tf, {'data':data}) + onet2.load( (Path(mtcnn.__file__).parent / 'det3.npy').__str__(), tf_session) + + + + pnet_fun = K.function([pnet2.layers['data']],[pnet2.layers['conv4-2'], pnet2.layers['prob1']]) + rnet_fun = K.function([rnet2.layers['data']],[rnet2.layers['conv5-2'], rnet2.layers['prob1']]) + onet_fun = K.function([onet2.layers['data']],[onet2.layers['conv6-2'], onet2.layers['conv6-3'], onet2.layers['prob1']]) + + det1_dict = np.load((Path(mtcnn.__file__).parent / 'det1.npy').__str__(), encoding='latin1').item() + det2_dict = np.load((Path(mtcnn.__file__).parent / 'det2.npy').__str__(), encoding='latin1').item() + det3_dict = np.load((Path(mtcnn.__file__).parent / 'det3.npy').__str__(), encoding='latin1').item() + + PNet_Input = Input ( (None, None,3) ) + x = PNet_Input + x = Conv2D (10, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="PReLU1" )(x) + x = MaxPooling2D( pool_size=(2,2), strides=(2,2), padding='same' ) (x) + x = Conv2D (16, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="PReLU2" )(x) + x = Conv2D (32, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="PReLU3" )(x) + prob = Conv2D (2, kernel_size=(1,1), strides=(1,1), padding='valid', name="conv41")(x) + prob = Softmax()(prob) + x = Conv2D (4, kernel_size=(1,1), strides=(1,1), padding='valid', name="conv42")(x) + + + PNet_model = Model(PNet_Input, [x,prob] ) + + #PNet_model.load_weights ( (Path(mtcnn.__file__).parent / 'mtcnn_pnet.h5').__str__() ) + PNet_model.get_layer("conv1").set_weights ( [ det1_dict['conv1']['weights'], det1_dict['conv1']['biases'] ] ) + PNet_model.get_layer("PReLU1").set_weights ( [ np.reshape(det1_dict['PReLU1']['alpha'], (1,1,-1)) ] ) + PNet_model.get_layer("conv2").set_weights ( [ det1_dict['conv2']['weights'], det1_dict['conv2']['biases'] ] ) + PNet_model.get_layer("PReLU2").set_weights ( [ np.reshape(det1_dict['PReLU2']['alpha'], (1,1,-1)) ] ) + PNet_model.get_layer("conv3").set_weights ( [ det1_dict['conv3']['weights'], det1_dict['conv3']['biases'] ] ) + PNet_model.get_layer("PReLU3").set_weights ( [ np.reshape(det1_dict['PReLU3']['alpha'], (1,1,-1)) ] ) + PNet_model.get_layer("conv41").set_weights ( [ det1_dict['conv4-1']['weights'], det1_dict['conv4-1']['biases'] ] ) + PNet_model.get_layer("conv42").set_weights ( [ det1_dict['conv4-2']['weights'], det1_dict['conv4-2']['biases'] ] ) + PNet_model.save ( (Path(mtcnn.__file__).parent / 'mtcnn_pnet.h5').__str__() ) + + pnet_test_data = np.random.uniform ( size=(1, 64,64,3) ) + pnet_result1, pnet_result2 = pnet_fun ([pnet_test_data]) + pnet2_result1, pnet2_result2 = K.function ( PNet_model.inputs, PNet_model.outputs ) ([pnet_test_data]) + + pnet_diff1 = np.mean ( np.abs(pnet_result1 - pnet2_result1) ) + pnet_diff2 = np.mean ( np.abs(pnet_result2 - pnet2_result2) ) + print ("pnet_diff1 = %f, pnet_diff2 = %f, " % (pnet_diff1, pnet_diff2) ) + + RNet_Input = Input ( (24, 24, 3) ) + x = RNet_Input + x = Conv2D (28, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="prelu1" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='same' ) (x) + x = Conv2D (48, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="prelu2" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='valid' ) (x) + x = Conv2D (64, kernel_size=(2,2), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="prelu3" )(x) + x = Lambda ( lambda x: K.reshape (x, (-1, np.prod(K.int_shape(x)[1:]),) ), output_shape=(np.prod(K.int_shape(x)[1:]),) ) (x) + x = Dense (128, name='conv4')(x) + x = PReLU (name="prelu4" )(x) + prob = Dense (2, name='conv51')(x) + prob = Softmax()(prob) + x = Dense (4, name='conv52')(x) + + RNet_model = Model(RNet_Input, [x,prob] ) + + #RNet_model.load_weights ( (Path(mtcnn.__file__).parent / 'mtcnn_rnet.h5').__str__() ) + RNet_model.get_layer("conv1").set_weights ( [ det2_dict['conv1']['weights'], det2_dict['conv1']['biases'] ] ) + RNet_model.get_layer("prelu1").set_weights ( [ np.reshape(det2_dict['prelu1']['alpha'], (1,1,-1)) ] ) + RNet_model.get_layer("conv2").set_weights ( [ det2_dict['conv2']['weights'], det2_dict['conv2']['biases'] ] ) + RNet_model.get_layer("prelu2").set_weights ( [ np.reshape(det2_dict['prelu2']['alpha'], (1,1,-1)) ] ) + RNet_model.get_layer("conv3").set_weights ( [ det2_dict['conv3']['weights'], det2_dict['conv3']['biases'] ] ) + RNet_model.get_layer("prelu3").set_weights ( [ np.reshape(det2_dict['prelu3']['alpha'], (1,1,-1)) ] ) + RNet_model.get_layer("conv4").set_weights ( [ det2_dict['conv4']['weights'], det2_dict['conv4']['biases'] ] ) + RNet_model.get_layer("prelu4").set_weights ( [ det2_dict['prelu4']['alpha'] ] ) + RNet_model.get_layer("conv51").set_weights ( [ det2_dict['conv5-1']['weights'], det2_dict['conv5-1']['biases'] ] ) + RNet_model.get_layer("conv52").set_weights ( [ det2_dict['conv5-2']['weights'], det2_dict['conv5-2']['biases'] ] ) + RNet_model.save ( (Path(mtcnn.__file__).parent / 'mtcnn_rnet.h5').__str__() ) + + #import code + #code.interact(local=dict(globals(), **locals())) + + rnet_test_data = np.random.uniform ( size=(1,24,24,3) ) + rnet_result1, rnet_result2 = rnet_fun ([rnet_test_data]) + rnet2_result1, rnet2_result2 = K.function ( RNet_model.inputs, RNet_model.outputs ) ([rnet_test_data]) + + rnet_diff1 = np.mean ( np.abs(rnet_result1 - rnet2_result1) ) + rnet_diff2 = np.mean ( np.abs(rnet_result2 - rnet2_result2) ) + print ("rnet_diff1 = %f, rnet_diff2 = %f, " % (rnet_diff1, rnet_diff2) ) + + + ################# + ''' + (self.feed('data') #pylint: disable=no-value-for-parameter, no-member + .conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1') + .prelu(name='prelu1') + .max_pool(3, 3, 2, 2, name='pool1') + .conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2') + .prelu(name='prelu2') + .max_pool(3, 3, 2, 2, padding='VALID', name='pool2') + .conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3') + .prelu(name='prelu3') + .max_pool(2, 2, 2, 2, name='pool3') + .conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4') + .prelu(name='prelu4') + .fc(256, relu=False, name='conv5') + .prelu(name='prelu5') + .fc(2, relu=False, name='conv6-1') + .softmax(1, name='prob1')) + + (self.feed('prelu5') #pylint: disable=no-value-for-parameter + .fc(4, relu=False, name='conv6-2')) + + (self.feed('prelu5') #pylint: disable=no-value-for-parameter + .fc(10, relu=False, name='conv6-3')) + ''' + ONet_Input = Input ( (48, 48, 3) ) + x = ONet_Input + x = Conv2D (32, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="prelu1" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='same' ) (x) + x = Conv2D (64, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="prelu2" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='valid' ) (x) + x = Conv2D (64, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="prelu3" )(x) + x = MaxPooling2D( pool_size=(2,2), strides=(2,2), padding='same' ) (x) + x = Conv2D (128, kernel_size=(2,2), strides=(1,1), padding='valid', name="conv4")(x) + x = PReLU (shared_axes=[1,2], name="prelu4" )(x) + x = Lambda ( lambda x: K.reshape (x, (-1, np.prod(K.int_shape(x)[1:]),) ), output_shape=(np.prod(K.int_shape(x)[1:]),) ) (x) + x = Dense (256, name='conv5')(x) + x = PReLU (name="prelu5" )(x) + prob = Dense (2, name='conv61')(x) + prob = Softmax()(prob) + x1 = Dense (4, name='conv62')(x) + x2 = Dense (10, name='conv63')(x) + + ONet_model = Model(ONet_Input, [x1,x2,prob] ) + + #ONet_model.load_weights ( (Path(mtcnn.__file__).parent / 'mtcnn_onet.h5').__str__() ) + ONet_model.get_layer("conv1").set_weights ( [ det3_dict['conv1']['weights'], det3_dict['conv1']['biases'] ] ) + ONet_model.get_layer("prelu1").set_weights ( [ np.reshape(det3_dict['prelu1']['alpha'], (1,1,-1)) ] ) + ONet_model.get_layer("conv2").set_weights ( [ det3_dict['conv2']['weights'], det3_dict['conv2']['biases'] ] ) + ONet_model.get_layer("prelu2").set_weights ( [ np.reshape(det3_dict['prelu2']['alpha'], (1,1,-1)) ] ) + ONet_model.get_layer("conv3").set_weights ( [ det3_dict['conv3']['weights'], det3_dict['conv3']['biases'] ] ) + ONet_model.get_layer("prelu3").set_weights ( [ np.reshape(det3_dict['prelu3']['alpha'], (1,1,-1)) ] ) + ONet_model.get_layer("conv4").set_weights ( [ det3_dict['conv4']['weights'], det3_dict['conv4']['biases'] ] ) + ONet_model.get_layer("prelu4").set_weights ( [ np.reshape(det3_dict['prelu4']['alpha'], (1,1,-1)) ] ) + ONet_model.get_layer("conv5").set_weights ( [ det3_dict['conv5']['weights'], det3_dict['conv5']['biases'] ] ) + ONet_model.get_layer("prelu5").set_weights ( [ det3_dict['prelu5']['alpha'] ] ) + ONet_model.get_layer("conv61").set_weights ( [ det3_dict['conv6-1']['weights'], det3_dict['conv6-1']['biases'] ] ) + ONet_model.get_layer("conv62").set_weights ( [ det3_dict['conv6-2']['weights'], det3_dict['conv6-2']['biases'] ] ) + ONet_model.get_layer("conv63").set_weights ( [ det3_dict['conv6-3']['weights'], det3_dict['conv6-3']['biases'] ] ) + ONet_model.save ( (Path(mtcnn.__file__).parent / 'mtcnn_onet.h5').__str__() ) + + onet_test_data = np.random.uniform ( size=(1,48,48,3) ) + onet_result1, onet_result2, onet_result3 = onet_fun ([onet_test_data]) + onet2_result1, onet2_result2, onet2_result3 = K.function ( ONet_model.inputs, ONet_model.outputs ) ([onet_test_data]) + + onet_diff1 = np.mean ( np.abs(onet_result1 - onet2_result1) ) + onet_diff2 = np.mean ( np.abs(onet_result2 - onet2_result2) ) + onet_diff3 = np.mean ( np.abs(onet_result3 - onet2_result3) ) + print ("onet_diff1 = %f, onet_diff2 = %f, , onet_diff3 = %f " % (onet_diff1, onet_diff2, onet_diff3) ) + + + import code + code.interact(local=dict(globals(), **locals())) + + + + + + import code + code.interact(local=dict(globals(), **locals())) + + + + + + + #class MTCNNSoftmax(keras.Layer): + # + # def __init__(self, axis=-1, **kwargs): + # super(MTCNNSoftmax, self).__init__(**kwargs) + # self.supports_masking = True + # self.axis = axis + # + # def call(self, inputs): + # + # def softmax(self, target, axis, name=None): + # max_axis = self.tf.reduce_max(target, axis, keepdims=True) + # target_exp = self.tf.exp(target-max_axis) + # normalize = self.tf.reduce_sum(target_exp, axis, keepdims=True) + # softmax = self.tf.div(target_exp, normalize, name) + # return softmax + # #return activations.softmax(inputs, axis=self.axis) + # + # def get_config(self): + # config = {'axis': self.axis} + # base_config = super(MTCNNSoftmax, self).get_config() + # return dict(list(base_config.items()) + list(config.items())) + # + # def compute_output_shape(self, input_shape): + # return input_shape + + from nnlib import nnlib + exec( nnlib.import_all(), locals(), globals() ) + + + + + image = cv2.imread('D:\\DeepFaceLab\\test\\00000.png').astype(np.float32) / 255.0 + image = cv2.cvtColor (image, cv2.COLOR_BGR2GRAY) + image = np.expand_dims (image, -1) + image_shape = image.shape + + image2 = cv2.imread('D:\\DeepFaceLab\\test\\00001.png').astype(np.float32) / 255.0 + image2 = cv2.cvtColor (image2, cv2.COLOR_BGR2GRAY) + image2 = np.expand_dims (image2, -1) + image2_shape = image2.shape + + #cv2.imshow('', image) + + + image_tensor = K.placeholder(shape=[ 1, image_shape[0], image_shape[1], image_shape[2] ], dtype="float32" ) + image2_tensor = K.placeholder(shape=[ 1, image_shape[0], image_shape[1], image_shape[2] ], dtype="float32" ) + + blurred_image_tensor = gaussian_blur(16.0)(image_tensor) + x, = nnlib.tf_sess.run ( blurred_image_tensor, feed_dict={image_tensor: np.expand_dims(image,0)} ) + cv2.imshow('', (x*255).astype(np.uint8) ) + cv2.waitKey(0) + + import code + code.interact(local=dict(globals(), **locals())) + + + #os.environ['plaidML'] = '1' + from nnlib import nnlib + + dvc = nnlib.device.Config(force_gpu_idx=1) + exec( nnlib.import_all(dvc), locals(), globals() ) + + tf = nnlib.tf + + image = cv2.imread('D:\\DeepFaceLab\\test\\00000.png').astype(np.float32) / 255.0 + image = cv2.cvtColor (image, cv2.COLOR_BGR2GRAY) + image = np.expand_dims (image, -1) + image_shape = image.shape + + image2 = cv2.imread('D:\\DeepFaceLab\\test\\00001.png').astype(np.float32) / 255.0 + image2 = cv2.cvtColor (image2, cv2.COLOR_BGR2GRAY) + image2 = np.expand_dims (image2, -1) + image2_shape = image2.shape + + image1_tensor = K.placeholder(shape=[ 1, image_shape[0], image_shape[1], image_shape[2] ], dtype="float32" ) + image2_tensor = K.placeholder(shape=[ 1, image_shape[0], image_shape[1], image_shape[2] ], dtype="float32" ) + + + + #import code + #code.interact(local=dict(globals(), **locals())) + def manual_conv(input, filter, strides, padding): + h_f, w_f, c_in, c_out = filter.get_shape().as_list() + input_patches = tf.extract_image_patches(input, ksizes=[1, h_f, w_f, 1 ], strides=strides, rates=[1, 1, 1, 1], padding=padding) + return input_patches + filters_flat = tf.reshape(filter, shape=[h_f*w_f*c_in, c_out]) + return tf.einsum("ijkl,lm->ijkm", input_patches, filters_flat) + + def extract_image_patches(x, ksizes, ssizes, padding='SAME', + data_format='channels_last'): + """Extract the patches from an image. + # Arguments + x: The input image + ksizes: 2-d tuple with the kernel size + ssizes: 2-d tuple with the strides size + padding: 'same' or 'valid' + data_format: 'channels_last' or 'channels_first' + # Returns + The (k_w,k_h) patches extracted + TF ==> (batch_size,w,h,k_w,k_h,c) + TH ==> (batch_size,w,h,c,k_w,k_h) + """ + kernel = [1, ksizes[0], ksizes[1], 1] + strides = [1, ssizes[0], ssizes[1], 1] + if data_format == 'channels_first': + x = K.permute_dimensions(x, (0, 2, 3, 1)) + bs_i, w_i, h_i, ch_i = K.int_shape(x) + patches = tf.extract_image_patches(x, kernel, strides, [1, 1, 1, 1], + padding) + # Reshaping to fit Theano + bs, w, h, ch = K.int_shape(patches) + reshaped = tf.reshape(patches, [-1, w, h, tf.floordiv(ch, ch_i), ch_i]) + final_shape = [-1, w, h, ch_i, ksizes[0], ksizes[1]] + patches = tf.reshape(tf.transpose(reshaped, [0, 1, 2, 4, 3]), final_shape) + if data_format == 'channels_last': + patches = K.permute_dimensions(patches, [0, 1, 2, 4, 5, 3]) + return patches + + m = 32 + c_in = 3 + c_out = 16 + + filter_sizes = [5, 11] + strides = [1] + #paddings = ["VALID", "SAME"] + + for fs in filter_sizes: + h = w = 128 + h_f = w_f = fs + str = 2 + #print "Testing for", imsize, fs, stri, pad + + #tf.reset_default_graph() + X = tf.constant(1.0+np.random.rand(m, h, w, c_in), tf.float32) + W = tf.constant(np.ones([h_f, w_f, c_in, h_f*w_f*c_in]), tf.float32) + + + Z = tf.nn.conv2d(X, W, strides=[1, str, str, 1], padding="VALID") + Z_manual = manual_conv(X, W, strides=[1, str, str, 1], padding="VALID") + Z_2 = extract_image_patches (X, (fs,fs), (str,str), padding="VALID") + import code + code.interact(local=dict(globals(), **locals())) + # + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + Z_, Z_manual_ = sess.run([Z, Z_manual]) + #self.assertEqual(Z_.shape, Z_manual_.shape) + #self.assertTrue(np.allclose(Z_, Z_manual_, rtol=1e-05)) + sess.close() + + + import code + code.interact(local=dict(globals(), **locals())) + + + + + + #k_loss_t = keras_style_loss()(image1_tensor, image2_tensor) + #k_loss_run = K.function( [image1_tensor, image2_tensor],[k_loss_t]) + #import code + #code.interact(local=dict(globals(), **locals())) + #image = np.expand_dims(image,0) + #image2 = np.expand_dims(image2,0) + #k_loss = k_loss_run([image, image2]) + #t_loss = t_loss_run([image, image2]) + + + + + #x, = tf_sess_run ([np.expand_dims(image,0)]) + #x = x[0] + ##import code + ##code.interact(local=dict(globals(), **locals())) + + + + image = cv2.imread('D:\\DeepFaceLab\\test\\00000.png').astype(np.float32) / 255.0 + image = cv2.cvtColor (image, cv2.COLOR_BGR2GRAY) + image = np.expand_dims (image, -1) + image_shape = image.shape + + image2 = cv2.imread('D:\\DeepFaceLab\\test\\00001.png').astype(np.float32) / 255.0 + image2 = cv2.cvtColor (image2, cv2.COLOR_BGR2GRAY) + image2 = np.expand_dims (image2, -1) + image2_shape = image2.shape + + image_tensor = tf.placeholder(tf.float32, shape=[1, image_shape[0], image_shape[1], image_shape[2] ]) + image2_tensor = tf.placeholder(tf.float32, shape=[1, image2_shape[0], image2_shape[1], image2_shape[2] ]) + + blurred_image_tensor = sl(image_tensor, image2_tensor) + x = tf_sess.run ( blurred_image_tensor, feed_dict={image_tensor: np.expand_dims(image,0), image2_tensor: np.expand_dims(image2,0) } ) + + cv2.imshow('', x[0]) + cv2.waitKey(0) + import code + code.interact(local=dict(globals(), **locals())) + + while True: + image = cv2.imread('D:\\DeepFaceLab\\workspace\\data_src\\aligned\\00000.png').astype(np.float32) / 255.0 + image = cv2.resize(image, (256,256)) + image = random_transform( image ) + warped_img, target_img = random_warp( image ) + + #cv2.imshow('', image) + #cv2.waitKey(0) + + cv2.imshow('', warped_img) + cv2.waitKey(0) + cv2.imshow('', target_img) + cv2.waitKey(0) + + import code + code.interact(local=dict(globals(), **locals())) + + import code + code.interact(local=dict(globals(), **locals())) + + return + + + def keras_gaussian_blur(radius=2.0): + def gaussian(x, mu, sigma): + return np.exp(-(float(x) - float(mu)) ** 2 / (2 * sigma ** 2)) + + def make_kernel(sigma): + kernel_size = max(3, int(2 * 2 * sigma + 1)) + mean = np.floor(0.5 * kernel_size) + kernel_1d = np.array([gaussian(x, mean, sigma) for x in range(kernel_size)]) + np_kernel = np.outer(kernel_1d, kernel_1d).astype(dtype=K.floatx()) + kernel = np_kernel / np.sum(np_kernel) + return kernel + + gauss_kernel = make_kernel(radius) + gauss_kernel = gauss_kernel[:, :,np.newaxis, np.newaxis] + + #import code + #code.interact(local=dict(globals(), **locals())) + def func(input): + inputs = [ input[:,:,:,i:i+1] for i in range( K.int_shape( input )[-1] ) ] + + outputs = [] + for i in range(len(inputs)): + outputs += [ K.conv2d( inputs[i] , K.constant(gauss_kernel) , strides=(1,1), padding="same") ] + + return K.concatenate (outputs, axis=-1) + return func + + def keras_style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, epsilon=1e-5): + if gaussian_blur_radius > 0.0: + gblur = keras_gaussian_blur(gaussian_blur_radius) + + def sd(content, style): + content_nc = K.int_shape(content)[-1] + style_nc = K.int_shape(style)[-1] + if content_nc != style_nc: + raise Exception("keras_style_loss() content_nc != style_nc") + + axes = [1,2] + c_mean, c_var = K.mean(content, axis=axes, keepdims=True), K.var(content, axis=axes, keepdims=True) + s_mean, s_var = K.mean(style, axis=axes, keepdims=True), K.var(style, axis=axes, keepdims=True) + c_std, s_std = K.sqrt(c_var + epsilon), K.sqrt(s_var + epsilon) + + mean_loss = K.sum(K.square(c_mean-s_mean)) + std_loss = K.sum(K.square(c_std-s_std)) + + return (mean_loss + std_loss) * loss_weight + + def func(target, style): + if gaussian_blur_radius > 0.0: + return sd( gblur(target), gblur(style)) + else: + return sd( target, style ) + return func + + data = tf.placeholder(tf.float32, (None,None,None,3), 'input') + pnet2 = mtcnn.PNet(tf, {'data':data}) + filename = str(Path(mtcnn.__file__).parent/'det1.npy') + pnet2.load(filename, tf_sess) + + pnet_fun = K.function([pnet2.layers['data']],[pnet2.layers['conv4-2'], pnet2.layers['prob1']]) + + import code + code.interact(local=dict(globals(), **locals())) + + return + + + while True: + img_bgr = np.random.rand ( 268, 640, 3 ) + img_size = img_bgr.shape[1], img_bgr.shape[0] + + mat = np.array( [[ 1.99319629e+00, -1.81504324e-01, -3.62479778e+02], + [ 1.81504324e-01, 1.99319629e+00, -8.05396709e+01]] ) + + tmp_0 = np.random.rand ( 128,128 ) - 0.1 + tmp = np.expand_dims (tmp_0, axis=-1) + + mask = np.ones ( tmp.shape, dtype=np.float32) + mask_border_size = int ( mask.shape[1] * 0.0625 ) + mask[:,0:mask_border_size,:] = 0 + mask[:,-mask_border_size:,:] = 0 + + x = cv2.warpAffine( mask, mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ) + + if len ( np.argwhere( np.isnan(x) ) ) == 0: + print ("fine") + else: + print ("wtf") + + import code + code.interact(local=dict(globals(), **locals())) + + return + + aligned_path_image_paths = Path_utils.get_image_paths("E:\\FakeFaceVideoSources\\Datasets\\CelebA aligned") + + a = [] + r_vec = np.array([[0.01891013], [0.08560084], [-3.14392813]]) + t_vec = np.array([[-14.97821226], [-10.62040383], [-2053.03596872]]) + + yaws = [] + pitchs = [] + for filepath in tqdm(aligned_path_image_paths, desc="test", ascii=True ): + filepath = Path(filepath) + + if filepath.suffix == '.png': + dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True ) + elif filepath.suffix == '.jpg': + dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True ) + else: + print ("%s is not a dfl image file" % (filepath.name) ) + + #source_filename_stem = Path( dflimg.get_source_filename() ).stem + #if source_filename_stem not in alignments.keys(): + # alignments[ source_filename_stem ] = [] + + + #focal_length = dflimg.shape[1] + #camera_center = (dflimg.shape[1] / 2, dflimg.shape[0] / 2) + #camera_matrix = np.array( + # [[focal_length, 0, camera_center[0]], + # [0, focal_length, camera_center[1]], + # [0, 0, 1]], dtype=np.float32) + # + landmarks = dflimg.get_landmarks() + # + #lm = landmarks.astype(np.float32) + + img = cv2_imread (str(filepath)) / 255.0 + + LandmarksProcessor.draw_landmarks(img, landmarks, (1,1,1) ) + + + #(_, rotation_vector, translation_vector) = cv2.solvePnP( + # LandmarksProcessor.landmarks_68_3D, + # lm, + # camera_matrix, + # np.zeros((4, 1)) ) + # + #rme = mathlib.rotationMatrixToEulerAngles( cv2.Rodrigues(rotation_vector)[0] ) + #import code + #code.interact(local=dict(globals(), **locals())) + + #rotation_vector = rotation_vector / np.linalg.norm(rotation_vector) + + + #img2 = image_utils.get_text_image ( (256,10, 3), str(rotation_vector) ) + pitch, yaw = LandmarksProcessor.estimate_pitch_yaw (landmarks) + yaws += [yaw] + #print(pitch, yaw) + #cv2.imshow ("", (img * 255).astype(np.uint8) ) + #cv2.waitKey(0) + #a += [ rotation_vector] + yaws = np.array(yaws) + import code + code.interact(local=dict(globals(), **locals())) + + + + + + + #alignments[ source_filename_stem ].append (dflimg.get_source_landmarks()) + #alignments.append (dflimg.get_source_landmarks()) + + + + + + + + o = np.ones ( (128,128,3), dtype=np.float32 ) + cv2.imwrite ("D:\\temp\\z.jpg", o) + + #DFLJPG.embed_data ("D:\\temp\\z.jpg", ) + + dfljpg = DFLJPG.load("D:\\temp\\z.jpg") + + import code + code.interact(local=dict(globals(), **locals())) + + return + + + + import sys, numpy; print(numpy.__version__, sys.version) + sq = multiprocessing.Queue() + cq = multiprocessing.Queue() + + p = multiprocessing.Process(target=subprocess, args=(sq,cq,)) + p.start() + + while True: + cq.get() #waiting numpy array + sq.put (1) #send message we are ready to get more + + #import code + #code.interact(local=dict(globals(), **locals())) + + os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2' + + from nnlib import nnlib + exec( nnlib.import_all(), locals(), globals() ) + + + + + #import tensorflow as tf + #tf_module = tf + # + #config = tf_module.ConfigProto() + #config.gpu_options.force_gpu_compatible = True + #tf_session = tf_module.Session(config=config) + # + #srgb_tensor = tf.placeholder("float", [None, None, 3]) + # + #filename = Path(__file__).parent / '00050.png' + #img = cv2.imread(str(filename)).astype(np.float32) / 255.0 + # + #lab_tensor = rgb_to_lab (tf_module, srgb_tensor) + # + #rgb_tensor = lab_to_rgb (tf_module, lab_tensor) + # + #rgb = tf_session.run(rgb_tensor, feed_dict={srgb_tensor: img}) + #cv2.imshow("", rgb) + #cv2.waitKey(0) + + #from skimage import io, color + #def_lab = color.rgb2lab(img) + # + #t = time.time() + #def_lab = color.rgb2lab(img) + #print ( time.time() - t ) + # + #lab = tf_session.run(lab_tensor, feed_dict={srgb_tensor: img}) + # + #t = time.time() + #lab = tf_session.run(lab_tensor, feed_dict={srgb_tensor: img}) + #print ( time.time() - t ) + + + + + + + #lab_clr = color.rgb2lab(img_bgr) + #lab_bw = color.rgb2lab(out_img) + #tmp_channel, a_channel, b_channel = cv2.split(lab_clr) + #l_channel, tmp2_channel, tmp3_channel = cv2.split(lab_bw) + #img_LAB = cv2.merge((l_channel,a_channel, b_channel)) + #out_img = color.lab2rgb(lab.astype(np.float64)) + # + #cv2.imshow("", out_img) + #cv2.waitKey(0) + + #import code + #code.interact(local=dict(globals(), **locals())) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/doc/doc_build_and_repository_info.md b/doc/doc_build_and_repository_info.md index 6fe4737..6fb36bb 100644 --- a/doc/doc_build_and_repository_info.md +++ b/doc/doc_build_and_repository_info.md @@ -2,6 +2,7 @@ DeepFaceLab officially supports Windows-only. If you want to support Mac/Linux/Docker - create a fork, it will be referenced here. +[Linux fork](https://github.com/lbfs/DeepFaceLab_Linux) by @lbfs #### **Installing dlib on Windows** diff --git a/facelib/MTCExtractor.py b/facelib/MTCExtractor.py index 41c779d..a43829b 100644 --- a/facelib/MTCExtractor.py +++ b/facelib/MTCExtractor.py @@ -3,15 +3,11 @@ import os import cv2 from pathlib import Path - -from .mtcnn import * +from nnlib import nnlib class MTCExtractor(object): - def __init__(self, keras, tf, tf_session): + def __init__(self): self.scale_to = 1920 - self.keras = keras - self.tf = tf - self.tf_session = tf_session self.min_face_size = self.scale_to * 0.042 self.thresh1 = 0.7 @@ -19,25 +15,72 @@ class MTCExtractor(object): self.thresh3 = 0.6 self.scale_factor = 0.95 + exec( nnlib.import_all(), locals(), globals() ) + PNet_Input = Input ( (None, None,3) ) + x = PNet_Input + x = Conv2D (10, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="PReLU1" )(x) + x = MaxPooling2D( pool_size=(2,2), strides=(2,2), padding='same' ) (x) + x = Conv2D (16, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="PReLU2" )(x) + x = Conv2D (32, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="PReLU3" )(x) + prob = Conv2D (2, kernel_size=(1,1), strides=(1,1), padding='valid', name="conv41")(x) + prob = Softmax()(prob) + x = Conv2D (4, kernel_size=(1,1), strides=(1,1), padding='valid', name="conv42")(x) + + PNet_model = Model(PNet_Input, [x,prob] ) + PNet_model.load_weights ( (Path(__file__).parent / 'mtcnn_pnet.h5').__str__() ) + + RNet_Input = Input ( (24, 24, 3) ) + x = RNet_Input + x = Conv2D (28, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="prelu1" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='same' ) (x) + x = Conv2D (48, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="prelu2" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='valid' ) (x) + x = Conv2D (64, kernel_size=(2,2), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="prelu3" )(x) + x = Lambda ( lambda x: K.reshape (x, (-1, np.prod(K.int_shape(x)[1:]),) ), output_shape=(np.prod(K.int_shape(x)[1:]),) ) (x) + x = Dense (128, name='conv4')(x) + x = PReLU (name="prelu4" )(x) + prob = Dense (2, name='conv51')(x) + prob = Softmax()(prob) + x = Dense (4, name='conv52')(x) + RNet_model = Model(RNet_Input, [x,prob] ) + RNet_model.load_weights ( (Path(__file__).parent / 'mtcnn_rnet.h5').__str__() ) + + ONet_Input = Input ( (48, 48, 3) ) + x = ONet_Input + x = Conv2D (32, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv1")(x) + x = PReLU (shared_axes=[1,2], name="prelu1" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='same' ) (x) + x = Conv2D (64, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv2")(x) + x = PReLU (shared_axes=[1,2], name="prelu2" )(x) + x = MaxPooling2D( pool_size=(3,3), strides=(2,2), padding='valid' ) (x) + x = Conv2D (64, kernel_size=(3,3), strides=(1,1), padding='valid', name="conv3")(x) + x = PReLU (shared_axes=[1,2], name="prelu3" )(x) + x = MaxPooling2D( pool_size=(2,2), strides=(2,2), padding='same' ) (x) + x = Conv2D (128, kernel_size=(2,2), strides=(1,1), padding='valid', name="conv4")(x) + x = PReLU (shared_axes=[1,2], name="prelu4" )(x) + x = Lambda ( lambda x: K.reshape (x, (-1, np.prod(K.int_shape(x)[1:]),) ), output_shape=(np.prod(K.int_shape(x)[1:]),) ) (x) + x = Dense (256, name='conv5')(x) + x = PReLU (name="prelu5" )(x) + prob = Dense (2, name='conv61')(x) + prob = Softmax()(prob) + x1 = Dense (4, name='conv62')(x) + x2 = Dense (10, name='conv63')(x) + ONet_model = Model(ONet_Input, [x1,x2,prob] ) + ONet_model.load_weights ( (Path(__file__).parent / 'mtcnn_onet.h5').__str__() ) + + self.pnet_fun = K.function ( PNet_model.inputs, PNet_model.outputs ) + self.rnet_fun = K.function ( RNet_model.inputs, RNet_model.outputs ) + self.onet_fun = K.function ( ONet_model.inputs, ONet_model.outputs ) + def __enter__(self): - with self.tf.variable_scope('pnet2'): - data = self.tf.placeholder(self.tf.float32, (None,None,None,3), 'input') - pnet2 = PNet(self.tf, {'data':data}) - pnet2.load(str(Path(__file__).parent/'det1.npy'), self.tf_session) - with self.tf.variable_scope('rnet2'): - data = self.tf.placeholder(self.tf.float32, (None,24,24,3), 'input') - rnet2 = RNet(self.tf, {'data':data}) - rnet2.load(str(Path(__file__).parent/'det2.npy'), self.tf_session) - with self.tf.variable_scope('onet2'): - data = self.tf.placeholder(self.tf.float32, (None,48,48,3), 'input') - onet2 = ONet(self.tf, {'data':data}) - onet2.load(str(Path(__file__).parent/'det3.npy'), self.tf_session) - - self.pnet_fun = self.keras.backend.function([pnet2.layers['data']],[pnet2.layers['conv4-2'], pnet2.layers['prob1']]) - self.rnet_fun = self.keras.backend.function([rnet2.layers['data']],[rnet2.layers['conv5-2'], rnet2.layers['prob1']]) - self.onet_fun = self.keras.backend.function([onet2.layers['data']],[onet2.layers['conv6-2'], onet2.layers['conv6-3'], onet2.layers['prob1']]) - faces, pnts = detect_face ( np.zeros ( (self.scale_to, self.scale_to, 3)), self.min_face_size, self.pnet_fun, self.rnet_fun, self.onet_fun, [ self.thresh1, self.thresh2, self.thresh3 ], self.scale_factor ) + return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): @@ -47,7 +90,6 @@ class MTCExtractor(object): input_image = input_image[:,:,::-1].copy() (h, w, ch) = input_image.shape - input_scale = self.scale_to / (w if w > h else h) input_image = cv2.resize (input_image, ( int(w*input_scale), int(h*input_scale) ), interpolation=cv2.INTER_LINEAR) @@ -56,3 +98,249 @@ class MTCExtractor(object): return detected_faces +def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): + """Detects faces in an image, and returns bounding boxes and points for them. + img: input image + minsize: minimum faces' size + pnet, rnet, onet: caffemodel + threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold + factor: the factor used to create a scaling pyramid of face sizes to detect in the image. + """ + factor_count=0 + total_boxes=np.empty((0,9)) + points=np.empty(0) + h=img.shape[0] + w=img.shape[1] + minl=np.amin([h, w]) + m=12.0/minsize + minl=minl*m + # create scale pyramid + scales=[] + while minl>=12: + scales += [m*np.power(factor, factor_count)] + minl = minl*factor + factor_count += 1 + # first stage + for scale in scales: + hs=int(np.ceil(h*scale)) + ws=int(np.ceil(w*scale)) + #print ('scale %f %d %d' % (scale, ws,hs)) + im_data = imresample(img, (hs, ws)) + im_data = (im_data-127.5)*0.0078125 + img_x = np.expand_dims(im_data, 0) + img_y = np.transpose(img_x, (0,2,1,3)) + out = pnet([img_y]) + out0 = np.transpose(out[0], (0,2,1,3)) + out1 = np.transpose(out[1], (0,2,1,3)) + + boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0]) + + # inter-scale nms + pick = nms(boxes.copy(), 0.5, 'Union') + if boxes.size>0 and pick.size>0: + boxes = boxes[pick,:] + total_boxes = np.append(total_boxes, boxes, axis=0) + + numbox = total_boxes.shape[0] + if numbox>0: + pick = nms(total_boxes.copy(), 0.7, 'Union') + total_boxes = total_boxes[pick,:] + regw = total_boxes[:,2]-total_boxes[:,0] + regh = total_boxes[:,3]-total_boxes[:,1] + qq1 = total_boxes[:,0]+total_boxes[:,5]*regw + qq2 = total_boxes[:,1]+total_boxes[:,6]*regh + qq3 = total_boxes[:,2]+total_boxes[:,7]*regw + qq4 = total_boxes[:,3]+total_boxes[:,8]*regh + total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]])) + total_boxes = rerec(total_boxes.copy()) + total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32) + dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h) + + numbox = total_boxes.shape[0] + if numbox>0: + # second stage + tempimg = np.zeros((24,24,3,numbox)) + for k in range(0,numbox): + tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3)) + tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:] + if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0: + tempimg[:,:,:,k] = imresample(tmp, (24, 24)) + else: + return np.empty() + tempimg = (tempimg-127.5)*0.0078125 + tempimg1 = np.transpose(tempimg, (3,1,0,2)) + out = rnet([tempimg1]) + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + score = out1[1,:] + ipass = np.where(score>threshold[1]) + total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)]) + mv = out0[:,ipass[0]] + if total_boxes.shape[0]>0: + pick = nms(total_boxes, 0.7, 'Union') + total_boxes = total_boxes[pick,:] + total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick])) + total_boxes = rerec(total_boxes.copy()) + + numbox = total_boxes.shape[0] + if numbox>0: + # third stage + total_boxes = np.fix(total_boxes).astype(np.int32) + dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h) + tempimg = np.zeros((48,48,3,numbox)) + for k in range(0,numbox): + tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3)) + tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:] + if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0: + tempimg[:,:,:,k] = imresample(tmp, (48, 48)) + else: + return np.empty() + tempimg = (tempimg-127.5)*0.0078125 + tempimg1 = np.transpose(tempimg, (3,1,0,2)) + out = onet([tempimg1]) + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + out2 = np.transpose(out[2]) + score = out2[1,:] + points = out1 + ipass = np.where(score>threshold[2]) + points = points[:,ipass[0]] + total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)]) + mv = out0[:,ipass[0]] + + w = total_boxes[:,2]-total_boxes[:,0]+1 + h = total_boxes[:,3]-total_boxes[:,1]+1 + points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1 + points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1 + if total_boxes.shape[0]>0: + total_boxes = bbreg(total_boxes.copy(), np.transpose(mv)) + pick = nms(total_boxes.copy(), 0.7, 'Min') + total_boxes = total_boxes[pick,:] + points = points[:,pick] + + return total_boxes, points + + +# function [boundingbox] = bbreg(boundingbox,reg) +def bbreg(boundingbox,reg): + """Calibrate bounding boxes""" + if reg.shape[1]==1: + reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) + + w = boundingbox[:,2]-boundingbox[:,0]+1 + h = boundingbox[:,3]-boundingbox[:,1]+1 + b1 = boundingbox[:,0]+reg[:,0]*w + b2 = boundingbox[:,1]+reg[:,1]*h + b3 = boundingbox[:,2]+reg[:,2]*w + b4 = boundingbox[:,3]+reg[:,3]*h + boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ])) + return boundingbox + +def generateBoundingBox(imap, reg, scale, t): + """Use heatmap to generate bounding boxes""" + stride=2 + cellsize=12 + + imap = np.transpose(imap) + dx1 = np.transpose(reg[:,:,0]) + dy1 = np.transpose(reg[:,:,1]) + dx2 = np.transpose(reg[:,:,2]) + dy2 = np.transpose(reg[:,:,3]) + y, x = np.where(imap >= t) + if y.shape[0]==1: + dx1 = np.flipud(dx1) + dy1 = np.flipud(dy1) + dx2 = np.flipud(dx2) + dy2 = np.flipud(dy2) + score = imap[(y,x)] + reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ])) + if reg.size==0: + reg = np.empty((0,3)) + bb = np.transpose(np.vstack([y,x])) + q1 = np.fix((stride*bb+1)/scale) + q2 = np.fix((stride*bb+cellsize-1+1)/scale) + boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg]) + return boundingbox, reg + +# function pick = nms(boxes,threshold,type) +def nms(boxes, threshold, method): + if boxes.size==0: + return np.empty((0,3)) + x1 = boxes[:,0] + y1 = boxes[:,1] + x2 = boxes[:,2] + y2 = boxes[:,3] + s = boxes[:,4] + area = (x2-x1+1) * (y2-y1+1) + I = np.argsort(s) + pick = np.zeros_like(s, dtype=np.int16) + counter = 0 + while I.size>0: + i = I[-1] + pick[counter] = i + counter += 1 + idx = I[0:-1] + xx1 = np.maximum(x1[i], x1[idx]) + yy1 = np.maximum(y1[i], y1[idx]) + xx2 = np.minimum(x2[i], x2[idx]) + yy2 = np.minimum(y2[i], y2[idx]) + w = np.maximum(0.0, xx2-xx1+1) + h = np.maximum(0.0, yy2-yy1+1) + inter = w * h + if method is 'Min': + o = inter / np.minimum(area[i], area[idx]) + else: + o = inter / (area[i] + area[idx] - inter) + I = I[np.where(o<=threshold)] + pick = pick[0:counter] + return pick + +# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h) +def pad(total_boxes, w, h): + """Compute the padding coordinates (pad the bounding boxes to square)""" + tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32) + tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32) + numbox = total_boxes.shape[0] + + dx = np.ones((numbox), dtype=np.int32) + dy = np.ones((numbox), dtype=np.int32) + edx = tmpw.copy().astype(np.int32) + edy = tmph.copy().astype(np.int32) + + x = total_boxes[:,0].copy().astype(np.int32) + y = total_boxes[:,1].copy().astype(np.int32) + ex = total_boxes[:,2].copy().astype(np.int32) + ey = total_boxes[:,3].copy().astype(np.int32) + + tmp = np.where(ex>w) + edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1) + ex[tmp] = w + + tmp = np.where(ey>h) + edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1) + ey[tmp] = h + + tmp = np.where(x<1) + dx.flat[tmp] = np.expand_dims(2-x[tmp],1) + x[tmp] = 1 + + tmp = np.where(y<1) + dy.flat[tmp] = np.expand_dims(2-y[tmp],1) + y[tmp] = 1 + + return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph + +# function [bboxA] = rerec(bboxA) +def rerec(bboxA): + """Convert bboxA to square.""" + h = bboxA[:,3]-bboxA[:,1] + w = bboxA[:,2]-bboxA[:,0] + l = np.maximum(w, h) + bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5 + bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5 + bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1))) + return bboxA + +def imresample(img, sz): + im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_LINEAR) #@UndefinedVariable + return im_data diff --git a/facelib/det1.npy b/facelib/det1.npy deleted file mode 100644 index 7c05a2c5625e0f4e8c9f633b5ddef5e942b03032..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27368 zcmZ6y2{cvV_dbry^At&$GbEKr<-Yrsq%x%eQBuYR4M;^JAyXn5N+g+5G?C)I`z4x` zN)w@h=2@vU*U#ts`>yr>eg5a(b=SJ*oOSkH&)x6Ydq2-}_RjWno8u!YGDBpuNl@^* zzz7ptJ(Df&CS&wWLe@q^M)X6_^(MbQAA~G0hgSEwkaOth#GNztV8-rIYTOPSCTz0wW^8YW8++d)= z_uxyNH z^03lql<>=*RL(mJsVQLs$~N6tZWh7%PkbvzB3j0U)04kG~m`ou>z^>F9j1_%LRH;-$>3O z1vXDbo0v~76Bv;1!ihuNY^9#Ru`P%QK#eeCSWtLDD0x1a-PYV83~8D`ju%?mzE?f~ zzw2D4@P97^1{p0vmA2{B=94I&-xtL;>D38dlHJ17my6l1S=vO_BUb2s?vG$&OC$Cq z3&Cq+ykPc_906J9ELeB~*-x=tLH5>MqBKvHJ+(C=p>K8y{;qv0IMtv?qGw*ie8<&f z^wV^K`0zw>EcmSOS6{KfG*OrA@i}fAFFa+tp=>zeU-!Vm^>S2fMV9c@qr*b|YYKv} zAaPQ0XSdLMQo7)PaWh$aP>CNny#&i5Pt|*A#|w96=hcrgoC=8_n*jL!^bzAn_hU3oQbh#bxzvkp6w^-6|J*9KJvJrd0C5~ z#CJ97kDi5bd)mkX|2|<$nlw>SJuRr%)-3c%vm~9~)x>z7BtOgaXw*UvLGqFJLPysm zp>=33i45?R3|Qg6E_hwI!hdCy;(yXfJYl(fvZr!c;YqSNZXB*O-h?BL&t#DS>;IqB zv4}gXtzJBCaR+tQ+UJ zO;9oJHd5wgucmO7s$4FbwUxd|il+ur@vNw{mM=OwkEh7J0hbU1yXf5~xOVqm?wLJ_ zR-ZY+zsN@O&)e?M*bG!O4VpK1FXnt^C+cQ|7;+r@tJgFJCCwWmH!1&dv9@Seh2izprK|)HpyJ zWxRRGIx~z6SpMHItNsUOvHyWtx>Quoc9@1l{eR$<_}`Up6|S~5T>bwK?$G}McZkan z7iAZ{L2#2jL}rPIY_&;*P4|u2A3hvo(;mP_O97RHM$i`7307Yv;7_+P-26A6_`Gi; zmThUcr+XWI_WVpve2}1ZPtx%2G!eiGX})%^7JjMD247hREbmoDjkyXm`MVVkpVZBI zXd#dLsLm^lx1w>va`L-+101=cNpD5nBomIw(*yT?A^Xk|;l8PHJZhwLL;I*R5IIzx zy6MH>Eu#-Caby-f@$>~cr%eMj?b-Ymjlt}T!+3j%J@3$p1Je)3@SRsG3r#!#nVO@y zgOdh-IZugByYYxKYbKD?`b6LAy3_YY>F6v!kESiHVX5u zE$e2Bb(es?wkPx)i9n0?Rgko`1|mr($aaKqXH2#p+#{$Sxg5p} zwPi+Af~iu!Aun8Y3NI~J1c~EwxM$5uDrk$P4_saf3%4Pc>PhI!YgfP{P>y4hOtl@d)be?=Fhen^I?f2*K)`9ab*v=3^= zDTCv7!WUm_NB7p}MAKD+C)AqK%zIXR=ha|dxP20czxp7We0@eF^=g%}z`DenFJn-nfc;1AEt z;L$UUF-+$anLJIFS}v2sPmgqXz6U8)oO|Rvy#cjGu7npJt>;`)R4E2l0}2fM~U{r zZt}P^5b%^Kc@m;Wz5I%~j7y~mokQ=TqDX8KsJGr&GH1>XDD{Y^alr*BRw_gP z{N0bW3*{Ti?WL&X&m7jUeZH+lRX%KJ5{Kj66#71HrjDB)@x#lm;QPYeTv9s~|FkW{ zSNeOvS@adU4#~$MR})~}eO;_O`iwl-cOI(0jK}Hv&Y-q>0%mK7;<<#&SZs9tpg zh32ox_7Ot3_}-B`ds8T|{BRN;S?AH-_Dclfty6=S zCHt}2E|4zVvXqJ*zb=&jqEDvfnQ?<^a~?Bw8I12$VpfM^AhggK>z{nWU-m`pTWbe; z=U2i$iyEx|P=G65hB1MW6L?te2BDP_S#|piwg;r*-qD6UDef3r$Sh@^KUDbf&JsSn zI|Jth8`9>3s_@}*IVNtf$K3Lxa6jLZruUv=Z@Y$5&r78cC-H%u+rE}AzTblp>)t?* z_6-(NF&cIHwxdpzH0?fj7LS%na`q#I8RX1>{`!76yks1FlGuwQglg!#$&|-Ekmi!k z$mFhM@>EH6zCPs_pZZ}k|L|cY-7Viu0(Koo6}*RSGin6i-BoCdRWZh;J_WbVW{hrC z!kK%jAa3taR19q;y~__{OOrM05L-EPZ&=vegb}Vm$dGz3`3i8``xp0zNGnp+FL2nR$ zEX_VjJO#bvqqibiB{v+)*Iob*-BuVlcn6fm)Z*|(_GsZ`i`jm)L@ig28b3;|cSxVe z*^!|_o0qE8;%hoO_$CRx`qv6}iq){V5;-n%a3B2GF^1RFnDV2o5BWuLTR!%(7^dG! z$A~-&h*tZDtvfb>(t{328F_$hQT3oB+lTUFPsOO=qdyo@<-^_PS@GHmAxg4-(7K?> zXK+vK*_jPf-_C#$tAcQ5rY@Vi{3O>|Fq4m4Awk=Vp0l?{>LJuUo7Op>Mz_*ef|D;x zV9|B~3bPIV$W;DI-WXOFzW$25_K4$DL!Om8QK>wT& zm|&`a9<}Pc>Y_F&8f<)w&Zk3B%5eyH(Z=*qBl*<7chG%&xA4|PJ8<|_4d;E#__c4% zEW39rRvLeTGCr4Kjxkr4H>BCz37&YX@loB^Q2#|b*6PJVwUaA)JyC<<-o{Y*LZTqvc_k7eWdBX7~K?h0-T z%EN|B_O>++j9m8^!Q2;(q*>j1_}93NJf15{&m=FQT59jmXX_~Hhqp*qKpBy?djWIb zB1|)ug1z_BaJ!Z)m)!jrte)ktuArfG*`ZTty&{GznrBS?;+H^0)hRHKyG0K4d%=x> ztpa~ZZM>g!lJ#$D#_MN7G1NqbZ#W3JO`;0*)GYYF2u`u2CrhzAEm3QR%X8!2o@lN2X^c@1uz7$Bu z+M$RDhlgo+h#_;m|25F-&ou%A!MT^YhdwbXQn-)i;N$?6CTlgL4&3_c^>l$ z+xDEr@g4GDV$Ru>f~6gQ$PJ7VuaxxYSdI zEr;#t^=TWZ!o*{2^=c8iTJ#az(EkZT>J!0HgiF}%>88YHVA0J4`UzrHCfy59jmZH}d(X zW4OF zYha%DL4S@CJlYh9RkG(v^YKdTvrebMHpZAAexAtxyUQlboh2l=11w)Y0)tr-?< zkb5zby3Aj|gO@3y-lXq>;WwiNX=8`M?UiF_K*v|m*)p8=dL5+8W;oYR{XCT>EVHJ@ z(~Ricff6{mYB;X39z{PmC!zneTwI#b$xhUb;|oVThY{VEn9Am5Lb=U;Wa79Sd{JCY z7ZmB>^()G>DKj6xslCNI^JMs2wi@G}se#J7a*W^l7wxw1fHH?K!g7OBJj*tL;lJ~E zx6cB8n3P~(>LTzmswCr!4A4(94!2#GV2)v_U~ha8h^Z=#J*x$7HeK+rdluciAqg+L zEXS_Lq2$rDTNqva5M8BD@IQa15z8J=?sM-8etTGoQA6|U_q{j)ciQveMUe^lwDO$5 zz}|K6zv}__-6N?|;c3`$*PSgLu+1^j-jfq{1qQTD9_UlJa|y?u6J z-V!}rE;Yd9)chb=J{MmC%?{4mVobz-EmXzqoBF1|$qepE(+Is?jSfzcYkn zJu1R3nuhn5`rxjF;cV#Lr>J&Gn#=CAn+=ulW#|CCZM8lVR+&%)yKd1ABmMh_?%`u{QbQ(L{xeq^x zi}KeK)8J=(nJ{-*I*wfUjEu@wG17%4#*gBnb{qMII5|+uea51PHLy{$uHbIv;WQVvkTQc-3_tHhNBm6# z)tf*|%pGxvXbD@lG@XqJF@_D=J#csPL+Br4j^gRApbK@~O@tX|4nJI-IUf#mgE8~dOi+Cmok)Eox4%jnBst>&w{QcL#>Pwd78{yXm_7 z#V~*ep~-S8#{7HEZ?63Sy2FRlUAGXUGDSwjtY>mA%V_N2&AW8 z@4yj_rL?2x5ZkFzfhxIr^w%p*n0hc=*yXc|TSXoKYiW5Z>7EWpU+3|4ub;yxryaC> zWD=P?Ob2CC(plqBQ(kwGf#XF-!bkjphRmb5;IS8cc9rE9qs#`^)?D;E`<0vyH-V|C zg%~rk0gt2&=RF2qFn{+MIFzUb`rpklrBR6nw7cPwLuyp;S50UpW=YTGoUBiSF;RjYZ;4wo-;hW5Q+O0Ytir1Aw%t2E$sw@+{@NndF!(*VnFcNAnRthrAzOd)Y zT6|kUE+&58PdvniQ9=LU446KRo+;izB_nCH7+Ep4D~d+UIRgiOml3hf zO7P!oOr!rypy!S{!JE5($yMCTFN9^#FrPcH&1C~k-&DoRyDqa`$q%XKtWmV1%1!tu zVt`CK+KgW^2<`E7qUupV-4)bfzHlawd>Je_|KJy1>wH3OmOsEo-M2(nb{mb^Cc=YV zTd_rM2T4rmfm6?%c}?XQUVZW~E>bYZ=wD5(4*}{M)g(+b%sIzPVbe{=_OU|bJ?D{6SkhmXLby~rDtq`8&lYg|SPQnsYa}wvEkxBOCy}|Gwo@9nw5ntmt z1?Gnj;`>BW;MB5kCl4`60&XCgWn|b6C@|f_uM^;?;I#WN(QfU(r9FDn-3uc_wl^ z#%?*;IwS$U&)y^u<9jhnYY+M7Q_eJWk^6`zvC?KSZkF?dC~Rp1y>GFowJ-+_dL7`Q zLJ&2&od_ef+W5>1JFrJ|fE;jcV@iEilvNysXI0kL=LbL<;Za40fjbzrl&fn}|GV<6@*}Nq9T)F{>e29+~{WhB<7HdI*nih{tJh!{A7rC7%?P zEcozF7dBk60d-*kbC4*+O_|1YEsr9e@gixI|=%<@GTWs#j=U#XOVST zUKr#TEv#(H!08Xvs9CTCEPmPtYTJj=28m;2gq=N?oD#glIxb-@%v}tu*tsY$@&O)Hs_tYx}oou zBl;S9;+oh5$UeCkSl>AQ=gSU$v$ByLb38>o?j9ohnzLZ_gU9&j%3_TBTFHDnq^b9- zY+P{90Y{aH+T|W-g!5AdXFu=9Le*wXs-w7)>oxv_EF&*^{G%ebx|R)B=6F+?eNklI zmi=hhJciq^v4p+(4t)PL33_PDF|OSnfb%~T3ys4w@zJqE)V(DU?EhB6jFrGg4=jVa z@vGU|R72KfnG60h%VDhiAn#H2hRo_MrMmKaP;Q+K5JzO^rh1{}yeVitaTKgJv*4;h zb$o)UH7?5OBbJ#0CTe{k%W!v)H0^Gzm|K z!1`6}w5<`1m?@yw9(TjIsu*ZAI6jE~KG03y&(z<1Wy6=gCN~tqq0(;$s{M$CaqTfY zMy!d9Uq2KCBAbM5wo6I6`XxA-T#IY!#KcR2#JcwN<02#pnxc z_3bn~a8{SSQNBeClxBnRfD~zWDi(}cVnYM}>e8L!LBvIvNa7NPFs%unAop@VYM)<$ zR<)BcsM>-qw3Fc<_dEiLJ(Sx%JH>WvYQN#~R$Oz+wdocFT@ z#-13$H|`l{*HJngjRV7=xLpLzCT_!qBQA8x$9O8y7e=o?tAi|lkySW0u%{W4+(tnK z=d||YuZ@4v*nR~_cBR(4?WI_)b`UIAH{#;kX|$rwlAT#?fwzY_@`pDH==}}$WWAX; zm`O!2KUaI+_;EGugrDq8_9e1@!#{d8=ocL?F_oJsS3*mO2L$9zg|?_5c45&ZJQ3)^ zXO5QOB<2`5o{))o4MVwJZZK8J*#$ZkiQLdymw&yWJXr5uz=g~_w6&&uo7fyK-z5${ z=YON=pk87+r5P0#6~G*q4(MP0MM(4pP^5M@EAA-Zs(z<&NSiyl%|1x%j~S5Zr^-m# zCrxOp^+0bq%Ht(pa%rP=SP(y-w<jYt$Tc)H z8N&mfo8WGZP0;!J2(G0OAin$pb`)NMC$UP{`GE<roKg5Sri5X}>4UR^IM5tOj+x$o;8>@LHy`at~kK35P*&%lnS)4*qn1I{Zv z#zuS?MwVsz&<82r5a88~JEvU{M84A_g+v|{GBmku$X7hr;{Xv_1w?dCGn@JTnE(fc zWkU@I5ahS9VVz~@@23FygT2j&SXI!VuH@^kF}BO1En!GN7+mXo1io9ecwUq~{^>zt z_p<@+d<}>0ywNOH(~f~CFw{Q*%})9?T<>(7BZ<8)wL z>=U@I05D^}1-Tf17tJL!xNXZhOtbdkrhUCAx#vF7dMAOfyBJOK8*$b0Q<$|Z2~SI$ zV(YGbua~jYKz8RLDcVfCefdZu`m6}5%IFHtvi4nD!S-BHZ(%qCGO6&SM6 zl8Yy7MMxYMx%~p+y}zr# z<8W=dS4T7cEPgL9Wm6dnI{7ymR>jE^ll4?*{o$t%-6ke#>_r<%V8 z)!DIZAIS5h#eXqngOIWX@$71F1$z2TvrXI;#d>5d*xIuf@%mmbjM^mGVEAvZKua_f z_5MA^!{sCSXHP$5aU)4^-Y(XhXav36AL8FT6Ie>F6i8)>(-(FldD8YK*yg7}w}g(R zKfc&=SFOV=AvFzeml(hmb7wr!bdFV47NbkpEKt1K$D%EOFPlCAUtC|rq_rckC-pvA zGTIW#xdt~KzLKlOoJS>z44M~}25U$E#Nr{Z@S|ZF-EmQaTvW@#%+h60apn*lX}JNJ z$E%@v`z9PO76a^qq2N(YG5qjIz+LB;vb{bgOlja9D_`Nt|4z=v(r-d^3U^?4+yd}? z+e9+lMgsbz>+pq{DBfK$A3s}((Fb!nTbR<$cE5R;-m*Q|ut zn*qFWtt$V})XTOw3WQamH`%4#HKd~bG2XD-Pt3PC!GusH&>if-RE|H!=)+^E?fsW{ z=~_G7J|Rc^B6c&sm%c2hP78!;E6J2`etdszH%fXpp;&7U!2Zcx$IOO5vyX!-Qu>&A zcr=;t>>#f1`7E6Bc_`mIt{t6qk-8^;yzg(^yiAUgqJ`M7Rf@dYc~LNq2Ef)2Dez{fIln3>L+foF7$SR|9KG@j z*9dmfoXp8wUV4*Y<-0~f`wLh8u~3idi5&#Zqp3vknSk2-i-j$-o`QJDPgZ`mQeflT zg2CRIkfNzY_kS1Q?6-}enUVxQ5@c!A%a>5c)dUZ3O@Q}a3?yT+aroLpu=*XtpV7y` zUOgBC76!xEsmIZ!R+K+#kEg?uv~laUAadw4V5mTc|9Xvd8c8PkW4nm`giTaW;~_Cw zNCxY_mmuK72dEAW;xFXlXv4+RKu=$V8_H^2`;s3oy;%tBn(biTabHr=atgmajmDw1 ze^~#!aWF#cK0EO`ooAL+60f{>j65}^^RM~By>J5wjjKZR`Z<-aZ~Y8A zBDdi1CUN$|-VMdyPT^sxqp`{OA{)`}kA01|Ft(x?&1+cw%{EPZ4|Bmr`!%|aSqAFW z)A9BtRm9e4wszEIxL4XiqRl?Dv7$TSX6I%&-*+4$=u5%E`wLL9a^r+@Ov*0;9ATipBhIBe&%&zWYtDC;`tcNdMXVwHJjP(lEZ`Er3YXt_(7!1 z3@9IM5^})WN#gdvIkz{3SQ($ph00c!2 z;MW01R4a-X-riOtuvbw;&-b}#BAdqUTdg1$rJmyP`umt%Bqf+;YJ|NlkJx3pgzH|t zf!T?MygE_>w=UQWmiwh4`BgLO6BJ&vGlfl8Bd6igAv&1oJ(IKsDDttJMsf3XN8!v24PJRfg^nAh%BvdN!91DJfNn3G z?_M?NQJ796$CsmNSsS01?1#D5NoW)$MTgk8z;8=UTD`&&u1u=N&sPgjF7P<4c916F zZ$_Z@P!1|?LcIR97#&;2v(Fm~iRAVU)VZ4ulkEba($o;u)ZM7f$sstq_5(H9Ed$fn z>}OYET-lizGrZ8U3NKjvV&_vN__L6G!uu`?yr|&?33QC*7D6dZTU-rS{tf!FtWCMq zY#E-lq=r44x)k>5rSN{Sy{u^DHndR}p(^Jj_$6&e?z`?S8KzSwAho~Pt*&6)Z@eFD zXWxO7Z|dMqsQ_r*4fy-v1^OE8f;M>*`fJ)3fyS>t5MJ~ZXAOCQasE=M_2mk#&1_|g zLqD*-OWBb2V8h^?z8-RG`iS#(aq3%rlW>X6@IA2+w?#;B`Dd@lw|Q+)tNQ|62lYFN zOg}7{7l(%x3h+~vIx}D3g+F6=LubZ0T>K`Mc$n`KHeH;CO%>Pi!Pv*>e6JYC>cr4B zDPtg1Js7IJXK>!_2e(`A;NsJTxTf;mZf6Pc0`h7}+Ny`>tQpH)^@oN>o_V)m+opu*;?!JPQ z9R={|=>$@@N}adecmopJ63ne2oVM!t@V@u5EHLaOxOulTd$k~bxPJu(r5|B_XLR{; z<6N}OzKz?Cz9(WAvhj)74ZLwR3uC{AV#Xg!{8hIWYY&v+c>e|-y3c?;$qgH<|0%*x z+EO%W_9L<_O@x2!kKht3|6=jxc_=kAk+-fk=ZjZ;hO(@k+&fJlmp<9U``ZuV{U`CX zPcx0Ce!R(EZ#cz*U-e=KD4+-d`*VX zJ?A$JYPB9#wUs5WOeIwX0{LqNVUmc*_1g&*V9`Ei*lJaNVgl}t;a!bF}wo-%KhSN4DqVWrir+4&hVm_Q__L^Np6DFMt3`Uq)yHG;yK*l`W?q5Un)keLk^}7dM6Zk#;ZKK=j1+? zFk?N75^sR^6UCsh!v&pR?10%PXYxZ1D(E-o6J~eX(c5|2RJ_<3M3?7N8EtiZILQHu z%EUl-p&vh}eG8g`X3)hO#-nzHGgrVL7<#A<8&;km(z5+v-BAQT&Yz^}sXyS?t!Z>% z{Z|x5ZCw!himYv_f8b!0-;*PtqEH?c)`P#mbOynI%&-Ic8 z?{jdwXB&%?eTe-jd+YZ;iiE;#Zz1`BGnbe$28OL^#L0#k#M4`b%$@Ee*wAmnr&svk zYTsy_T`oX{6_>~a5oap@Z9W7%o=*MRd)WBGLB6isgr-~GgJQ;Mz&)y`=8Py5(L23N8@=W{@5eT_0 zo1pEP=%9u?3S17HhKsT~+#v8c`*I=^C%%<}0Q)!W+-^^NzH1nN@TZtWPfrj^#;s+S zc1rS>rBU4fZVyY=6XQkmtnu&m3AlatEn&6#3ABDXfLHa^$(6tfaB9d0e6+k6zrLGlu z@MCHH;~*zCr)nua)k;S59b350nw>(wRr-8i$4Hu2@D#1hckz$=WQFJb5qvzupuQ** zrcD?i;}&M)nIkGpYIi0+Yx)ftmNHyzZUWvdxj?3cok1mC;MUTmcrR!L8>=RP8`Si0 z>EpBbVQMK$2uOl|8JXIoJq#^u}Qw^K0xfJ!j^}&~K0xXJm;PnRCXmoEH#0S~% zLltX@f6Q;zvSZN4ArZ`On!U!=a+3J(+bUs6y$gHR;|<~M!(dbX1?JSX4wv|}v&Vn( z$d_k(P<49_Q<;5vQ2*FLZRd%T5k)H0`*u41_R10dZjiwvdk5K8mq{>p$`huUI+Cv` zc!&dAMp283RKCHX2fqb23mrx|u_T+3T+(3&G+$w8beY4fh+5b+$mjeEk>bvFJAo9n z3$InW!O&OGf*c#CVc;77{rqGoKYk+>WV?G&)6bq?oD@3fU$4NAW<%_B z&xumsHM>~$&?I_Gu^eTNpGK#nrF`yL5ngzu7*ECT5Xyd)QgM6u*}LPpo$UdT zxT{1n{;uTGvMS{EsNd}3sv|JcY#aVvP5JdbzI3_jE>dg9(D%hZn2dp zyy3-Xygh``nn`@I$UfF^J{Hf(EMmsB9H*^Sz`n&9DB&6eVy6>uPwWuBW}tw7IPe%G zJ-VT3f-UVeawe-or=Xda7T>((IP^^`7CNQO=Z_ccq|g0Az`Ls*4o|n{hqm29*@cQ+ zL3<4Sc0db?_PW#cW4hSf$sKg<3{RFL^x}sm4a05P9^6oKe*M-SQJg+mR-i-`xcqU3 zrlLQv;p96qda)LD*gll`o-Y%w$Zvpif*kz2N*rfiSH@kj69xL4(%|17P3mrW2Gn1j z!l61tXmpqT-FMOBaA4hJ+?4zw%b#gTSI3){~|4HJH=D&pbGhRUJ_FTHfdb2S0RupK@b}>tBp`1)-L^S_zI)OrTy|tWj>^+UX|)s*6rwojUkO5(lmxr4Cc>@h8uVT7 zdip%F4jx@DWs8MrB=z7H{yJQauG7~C`|E8WSW^I2FMA;4Lkec}q*6WgHV7TOLFx3n za`6^b{M=OwD!J*j=yMh=EA_@>Z*u5!%gs2j!Bvu{P_`figMUfTKUTU_E^s3|HG3z?bW-Byb`&s)l@D;OojKmx+X%BXZ`r;&tqYCP zd3^UfFJ4#hjt?uFN%MPU=p;W6w*OWgehl-no1itBKmGOqQX~d_^zy%O@0d*bYf3AG zzmcJGHwSypY)9P5U!r`S2pEl27S^461`j_7>?UkA;2*8pLELi$EL^aW%~<2a?yt3g z4LBC=Xmmhh{z@Dt`5YH`C4on~8h0OGgG2Lba7N`sRO|l57EgRfKA$Y%`#z1ujvu~s zw83Kj&2%?jy4-_mgY)%dM{(X6{hGKX=74amGH;E`!Qsub1j2<|1$NWj1vjfEVEOzm za!Gn0E)Kj5@)s20r^7;0og9JDa=hOEpf9_4O&-?U?g6vn^DyW2Win8t$qyzhMZuLF zXyp}%vR~uqo-Z7#4lQ9ae~&`M$j>Zctv~n%2GEji4shX0n{dz9{W$Gl0xnmrBu@k@ z@YkS^bxYm>z9stwc7}PuPrn{enLe6lT$};=;T`OygB?^eGqD1 zPZGyS@-B;V%qU(D1|{Qoil#bCIy#1)Kh%md8q`?Xie1pZzL;u|1FMF>QsrmmZQJy_d?^ zIC7a0k8=Nm7RCv@#lHYzk3DAX*Gl2p;Tm|B^n$FqI*d;~k;zgbN=WuHeGGSe2wRuy z;J4P(#B|&fjPK5YU0+pTn$0J$1r1N z1@7$oMn0t9VSh)&gIvfLa62f=jV+sSw%7*t{%;ASdpDBDxyx{tM-fU6`Z2#PnvX)4 ze&ORwZBS@5o#&;BbGs@Hu39g~qNUsM*9|AOc=0)|DmQ}LseJ@TD^I>&wFm1C_|m-# z@1RuMI(WHs6MB6fMZdmMJ@_DQJ*z9?>{6+X-ymj>$YyK-uCw%e( zAF-c;fabCEVpA;bAJpl7e3-{`{}y9zsR~`}ssN*eiELZ)8z#XR@6Qv$K*$n2TKrOI z{P`h9j`@fgi`($+M|IkL+7T|b#}9H{3xvnCGeCx3g=S-O47Q4(rnCSqd@F>4HFJ2b zVmvzZjzcS%c=~LR1HZIB5uImCqmtQK3~n;unvybbtjmNRiapGNdb5c-*#Svy?M$D< zP5haZAjT%Tq}o&W<84(la*!A!3T17I3=O??f+%cQyI*HsviyN@sKSj{|D z-lH(BWXeIW9Knr_L2iG;2u@GCF!vif@X4|LAph1M+!|Bydg%gT<-TD&bE^R~bp?Uo zPdu4YoKGel?h;5XJdRzip-Remwu?UGg||5@;!JL65ad@!ZCd(EFxDP;fE} z1-rs|O8+WY6=BZBzb}Taa%q}q(~PsiLRnvVHB1djfQ;LHq&HkkFgolfOuF|1LLJND zcuz4tE4C0cX_`U#d@Ed)>_#h%3}HfJBCJj;gWj*#@T`?JU6CzGTW&RwM{N$EIme7P zS`R+=aq0*x{J9rnB8T(-s&p(KT(>Qyljx-Qy)d>wm0lIMq8nBz(GjmI;A`qQp5}7` zu74?lp*!wEsNN-Fps<<$J!FOUx9);Rp&41f=s2_u%%D>qR6u;~9{f?Ag-aX@*aQ0* zt|Gc+@OeNxI@GV6h>yR3sWA&_;?1bRUhDu~Y#POX{EFsl6rv!ib_T!GuEIx}20*8n zE_@v}72|cnAUekt&qn>hTi@=IOJeFYw8Ii=I`wF$damsX-P>UHPk}CS9WC@^jclgE zTL^jnlbm%`#QWB_V2IT^Y7xna&v->@5Uq&@^`599sy*nvv%;_q!AOJl;gS`HNb=ES zxTj?Wd*+WrT5kr?Q46Sy$!#*vya>z!&B$K2S@_pAh0J*E4#Q?t@^`h$-0JyuRDIFKUYK8jomhtE ziQ>#YvXX988BTk{${<7aF5Y;fMyI9^-nHkdqlo7{*b(H(uW!46)dn?!P{;dlZ~8lw zGTK6YgtlyzdNg%Vc?T8WKf&``M%4Je9rl0w4l3K05#6AfO!Txm-HFxGzAeA&5Dvc_sNQ8!ct(_r?lB7tX!4N`4nTqB?X%I~`XDH!Y z68h|GZDUcCkfF>Xp(0e~XW#ehd7X2gbN+#A?;rMF>ss$=DRSClais6nQ~K7~ zmYK6ZhVEXdi)AD1NtNGn;`r7W<8GhkhFo9H7@w4-4OjEYHz`1ch50mftSXTap$;ccV+4$$V5u3P$`STLql8()So?(ov}W z_N|bv)$%0QegzWKNlj#Q%_cf@Lmkl-c&?N&;hg2~QrdOXgsfX)NAp*o<95u~rc1sb zBD$GlahP5u{TOzT8A#ks-lY~$rKfX2>ZKy%f7+Jj$I2j|I1=A~X{LS8@6wOEGRU*I zUv%$N3%aZ2HCf)ML@%r^qCpZXh)3W)l6f?e2Hq|wn_d5kCV>-uqvJzEI)_sE%d?m% zq1~L+TXBe^mW==4Mea>e1YM{q=#fef(!0tM_-cM3iD%CeiG{DYqPLmcE~sN%J_h52 z+8x+*wU6^^QKFNZ7UAgGW64l$393~l57t*-lhx}D7*<&p7S8Mt?T@+6&@r0ut@9?4 zt&b$^=3JtkbC%oQe1YCL7f7xMK1v0nz6>>Y*I2{a8rd{N~y;hel)0?af4To*Ad-lYnAM*NDulWn|QoZ0hTr zN!+rd$g`bGnKLc`Vz;*8>FQB%=)n~_dD|Jr(aBh>rDFJ_bHUcnbr zLh5@`sH%G)jgN_=hwQ{Sr)7=w*{v#Wu4Ra(jBP+?c6c>S$SxCD-M$nhDT1A&HE8xYO zjnvR;7E`7$8P|G#BNDTt>DAvlbk8>fx?2U5$VdieJ10W(i)0U5ic5-xlYyjb~5Af{pxb>;^-g`70HuEUE? zOOQsK`;Pk@Aw^ft&cxv1+i>K&jnus<2d&I6G9eBIz%BVhs{1BkLt8V|EPO&7KRYu+ z48Jln4t*2q>210xv0Ze&_yIlAoP>D4f}w&U(_|b&w_eDi<%hOlp5g;0 zOwiul6#fA}??uFaz#cAo8VWOgEz#3^PE*?2=m^%4rrDdInU6YEy|$D-Tez5ueC7!X zz%;0<&vtq>rtyGn<}Ps68j&2MQP@SOuv66)B8__mA{51j`3MZtFt|6 z$ptAg+|z_CS8ybm-E*)u{T;K|#~$XyGeqmqT3YQ|DVqP@7=w>TaQO$Xap%RCk#jD( zutrr9llK;J=8pMTr;`hvwz@RkI)$7RwAv1~nOwVflIW`GDf(mC63{Lhf)<||h~+TK zc`Zs{(C!r-_HzT|oD0IkGST4nE{zT>G3LL<9m7#-%49|K0{G=#$7w}nlJ!s3sGX(` z#<+#Rs$K;wK^wUI&K(QBuOu4ZVzAruAzgnw4c*Mjxk|0GG(Tt`_3?T{H6`bfH(HbE z&uSmynDUF0>jH9E&4YZYkEgOG7O3gwt2ByswU=-{ z8N2ATEN_~1y_PvV+*Qzt4uiL@mT>&5II`DfK**0=yb)$f8{P+indKNzn4`$4r76;KWNGhLaY~5>gm>z$s3{N#L z(LkAG6qWB~g7E?!ckdz5nm!$G{TN411lbecGh0YwJ1;UAB1wwk>q!aGr{XJfnBjVx zV10xM^sFrdyh~w85Ka7aJL-X3J;B@a6)H}2rx{cjYv3dn_ z)#4(w&p1JzCAD(Lx|_*{nl3J_aVMOUPlRO;)1jh%qM&(+#j~45)F~ndznczWS1Hvo zlN<8Mj1kSGOTU25cVA1-&v-(0|9BFwU9Mab>Vs0`J!*972h-AHi1z}vpxhs4H2m%X zx1y7X)2#zsd7cmP>9|b?9*v|LujkS{DM2@XVI}ALRhR6w6UQ^>-cV=Tp~%1T;kMk! z;zIrQk|(_$iNlCM z#y2jCtnHdk$~PELlQSDh{M};u<;WTO_U9<9TRN7ecEw^^VuG_m9eF&MOr9vX(8#ELxbJ2TQSzqT z6QSqcohr;Mak|);Ar5-2gXDXcHhKMgU#-XJQto8mJlyrx9~{O?qmfrJ9T>Et^PC%) z9X-+Hi9G?isWtSLu^C-(>;|n1=WB1M#&W06o58M4+^cyKBhT)K<$z<;E3uN&aH~KO707h`j=~BT@ z8Z>k)wz7;mcXf*#dKV#5{Bg}(T@8!XW}w?w zFJZVm$-JQ#Xn~;bWqfcA-3aF1jbhDGt$`7WlHd?>*R7nY}ESmSDq#6}aNadiLSVF>HgU9V>j#1Vka7>x6qctSa@Tffa}~pP#ulcP>?k$+AxKZYxyByYk`|GSk_eksla=MMg*JSTf)JpYBQ&Ej+v2i#-`yh^bw2^H5B@zVbWBBfc25iHj%UCE= zN?*Lm1d*-+`~2DwoUI;&x6EEc@WXqI*?Ylwn-#A8M90X za_V0Psrx%IF4B*>sLG+18fUZN&miOHJQbf`H)5x%n$0t%b^y#d!+>#1TKY0InF+k=j~0 zFuN3vaTR%3_|^!;31ZM8+6PvDPMWkh%Hk>~cjVDKpW-qn&!tKh-*dBWbT{fqq;-ggB z(U=LY@Aa62|Gea$UKj3+%VKcOm{X{oX2&P;3fMXJGLcz31gBmn7}X^KpTwLoxZ4z} zmOlmirfA$VITGq7&tr`~cTu#byr&=2eLz zb}jlvw$?O~h6hPFLW`23-uI+9RspJ?OyXLl?I5V~8vNN($6f1fN9lh$;hlpd{Fpx- z-L6Ye&2VQ_d*p_3tA&s!=PB5wlMS~%Q~EkzlHG819$Azk!as|Dk{1hHx$3ZVE`N#+ z|9!I*U#zm94r`7e$Hgn)WY|{ra&Zqiepv-9!>=>t^Y#nh)O^vHDOu$A@l0a+atnEA zQ9_$W7-H@B#hhvNM$k6j#XWY}$xShrn|fm z{VTv@?``_kwU3GV5CFGloAHm_597AXEvUN00B-vx!2Tvd1nQN_%s+kEe+s0<1(pSCYa`z4r04@Hr^Y11GCJHMYEH~&?~FoSeJYYhqHmn zDB(N}Px(jStXyB>+z>$Qv{m`#6$*?)p9QC}GmAFlujcuND4hC869SL7GPU;hq-;SN z89Fo$O&YUd%Hj~NVelB%-tvZ4_k9?l9112gf70t&X;5)L7XmxyV3pP?!XL@Q>P?kk z`6!1Zz50aZUwp}@VSPl=Zy4OqoP&#OU%*UxJKp;3U5N4cB-&!6!R~1v&;OlvoBbgS zUIl(){EkzM&Dv+_I0Pj@r%DGcN8KTjjXfk`yb7D6D+isQuYz&7FQ_!7(#& z3udKYs0_@c-Od+q>EZ+$uv83F1+doV>}T8)Xu*6K(g$bG)X)L1Q(WzgC^#E(mP$pJ zl7oiBX!V*F(sAVl`4hDpJbzWO0sYF&qRTv*=!tl zM}o}>xJIJmW8uiyXfREi2vG8gF7+%X4u@pu*}s}t_;e>x87D?`R*!`bGd!`xyhe0( zZa7TyPNWrD^Xcln>BKeS0cm;KLM*%!xrtF4R598JEXo(aFp>y?fe)$Df;ev7DnA?? zIf7*T(4<4xlyfRJltvA!5&3D9a#jJVqO~SPFyDU+J$So~X)Cz{D{sx>9o+;&$kwYw zwn1lSchvUeqr6G{rumWGZHqeD9k$Ybl4w=3FkRv6{)`=bC#IpzB zIJ1uUd{E_oXNFQwV-+m%&qJqSuect$mGEI@2`P$|VI}OZlI^DgAZAVp$T(4$;Z}kg z--56*l4#0ut}j21cA zUx9bcGssDSXb({0h`fF_)O`#?yUk71Wp_RK+5MZ&`g}yxx2>0xU0sO-+pWm?br$@m zeY0Uzu@ZPQjkt%8hq*ad$%`6qB4%#MeiwSG*r-HM-@X@Cy^6$xd@DUPcnd-s^;pL_ zX*k(Gj`u#91-eDL{LXJv@ljbkvFy_14_DlwMlMtF<7(mk-{hlgVmekR72s6q^XOHw z4^~hMbUE|8)@t`hJYHZ2caxpD-7YOmc4Gz6ul2_Py+ZD~gEIFs68Pl)y}Y|1G$_1a zfMUjS-F98@T*889doiBdI|oet(&*CD zrJ_DlCCq(v1zvyE$6amm?05ZJw5z@nm2C1!NW@p0=j}cqYvw@Q{zTjSQ}P&gr09{W zFHCqx>0?B5AeFl8&jr&99M0DH!8NL?@DnE};Q_%cQoK){f75fB7EIK|&s#O|;K+R_ z6E_z8s}|u~mq;jIe}ic{kP4$sW3XhxOz_gq$Dx+TVCBm|_`PTq++5jCt=-DVK#wJq zUljUov>SI^?HPAEeLv{UO~$mFb70Am_e}oqEHbbeaK2a#S<)xTPaI)S#qV~~^%_@s z`=s?mcT+lf#9qMS{teXaR-r%&3IqO|BP2wp3YjcV-1ciW^q(pupANsED<*v69vn!5 z5KqAF%t$KdmMGet_kqa|kA>vfo%Eq^7J3zoB+6DVnb1+iM5V|W3{A$u&GSdl+e8b9 z>0$WCK@uCLl)^jLIcQa2KuL)lSfyB_Ysq1n({zg@HrRmY=6QGdc8)T^_b0^JSZ!5!bSD_6zmucdAB^#le?GbSOdk_YJ%h*(ab&v0 zagmcyPtUA!p^nR+!K-_!7TeSuq3%i)WaJ4Z*|;p?m{vz73TBr`m)ADTxMuol_Hv@u?TM~S|I)X64#2H{ z0;sx57?FKkPuH}J!iZOoiI-YCz1Z48T+#+HyF-i@RXwu)q;iE(ujdHUdXG31GiZ2e z46F;k!PJ<1A$xO$aGcH7d{<~BNUoQG;I2+|x2OQq4KwlH<|ioHo`Z>fOQ0=MiD9)r z(o++Yz~X>CG=zVJ*E+sr#z#w-F^fUXgc9gz{z??a--jYIAyA9m!?)Mf;G!>*a4P?c z=u_1ZSXj897Xl&a)4w^Ob>}nK6*hxmSr%>)?!>>hC2>QAfTEIJ(ZYSD2tF8U@F`&$ z{Pwor)PL_fa%3h8<2)we-hdIX=-hZroFz*vMvL>3SLCtk?pdgLoB(Sl)H4B6iCA#A zQXq?ul56h$)a83UhOS-?cINkJntwi?`cg->pNOLa%Yw-hIY0g%F^*20>rE7&W5^@@!44=zAYelvk;c6kl%`Wnqa9k3^&>y@hl8^O9ItxRFFNKxNlc z*xz@TT6|xE$=e*@?xCS@;&BRHtDc4Z&tr&buNxf{>h&$PB%E@n94c>%s9DiE&>9#9 zdIh6-e(qyPTd|h;d1nsxWX6N-m3*+DQ%J@$L8yOkJpK6A6i+s8CeQ9SqeZF)+^$yP z*y(5JquC#D^sDpKI%^VcND3B$8_Y!;|E`7>VfIMfbArx>WEj6Efs|F4Fn{--pur`h zU}yLl%AATO0ha|r#Onw=8!isg5nX&hucnRcS66HG)GF?hVJtM6E+ub%@58vlRb*y~ zD!nq8LP>N1s+GsX{#Jc>VBHS`VM=7a{2Huz%j4w1A=GeF2JG>RMSWKz{NouM9WCnqhTB^MV@WHblXl&7)!5P*qaI$iR<}7qMg2Bby1iYS86pMNhaD z(rjZxUS>rk?BD8&67Pgr|Gu-x*m4mQRhdD`-CSVCos~4f_b3^0;W!!4eu66xKY_Tp ziNqlJ3jH}-76w-*LA_Qr)fPznsd)yBlf66E-!H>28Z%Jx?Qmc>r_hQQbI85`BYttt zJ`{gl$AnGUPf|lK5cd%oL`_8pR3%)&^s*-&_SXW>3}fEwzyrGKa0n?}VvA(OTT-$n zm?%2rgOkHD-bg)*&gbJrKQNQ1EY&1hSEqsgR(UEC#4SyoCeXM;6*mYW9+lq@(@)t7 z@F%92>_=^!-KvgNGY>;^{xQ*}<%>zL|9lMUltA}!Z@7v5s-$!EexR!k63IKjd)O;M zm&hLKwS{21@tLIl)($*k?Ztm-@#Ia_q%eM~6ioeLO#(&-(HG|@jnvg#4JY2eQi+Ii% z3G$8lV5j_@zBxXRD;ZQ1LK~#mKD$`>7f|KktBD`q%R=m?Y*Y@J!30LQ(Mv4kv1U+v^@nCnJi%z_Z6(G%MG$SR#m?AKkCBe$l=Z_sA!-lWH+kd7~jqi38mz}3J1ra!W<$CC^9tXSn<=D45GVnV%7eC#+O7bRl z(UHFo`!xto zi#NQe z{@k*BOyBsHrYoLATkB}LyCPNSm&2e3HJI2SJcGjulsZL}k_<5e<{NO2&AF(9)Rf+Ne8bU{j<^ zcC-}GS$@q_fA9~Tdb61prhF!AJ_o?#1~+Ckm1WXB63h^i+W_Ub2K zy|XFwyB$EsuidyOY=HQ<>?9V;tEk@gmry^^i=Vtvg6$H!gYFIIx!`e$a8X5t-Sc)h zUnvM4GAeT6{LUP3v}?shmE!EsZf!6=@{&%h?%|y4?m$|N0Ua%61r~QL@Z&E9eqrwv z8rnUH-g2seDe~(<^1VRDV27wrxCl2{HKDkcHeRY354pDzK(VNo+a4Lq1ugna^yd|G zZL<#Y#=S|j=G0+u%JsxK=h}!R5#f^Q4;cEkNLcL9Pv1AHp~v4Ik!7YdG>!Aau3`GH zHA4^`HKsFTS2UvUD;G$59}1n12+Q7 zfRPn#{T2hQ`_IubPj_J89Mm0)!@m*fP^UH@+xr$#2altma59I|w|>-KFc5w1j;8Ih zWALV9d{JI!& zqvRy-kiClAvA^0zB$z`LY!||{$vYv|l+s|uFXY;uI}q9ykIz?&conbTXgYobH8RN+ zEog`#xi>nv`Py@6L(oU^ZF4m>PwOY%{Rv<*S}?W?*zDwlPMlC}j~!{g%#4wZG%l{3 zHm-_<%^Up4#m|Y1?Xfie|A6Z1{{qz&{|&0!ekqJ%4_Ij1W?PK5U6%V0B2GrJrn9cG zCJySh)#+-sjz1Wd-9HPq&M1M)*DpZuy*Ad5>8EL-YOLY=iMIa*`jB*CcGC7`q4 zj1O~kWcv>;WrKY~`TO%O+ZYerC6{F)@Vkv7`{|PeZ?aO@6yj;X_xf()9pld+bV%{N zPW^j+%!SLy77m7|v}~HUWFiT*Gq{_Q9H} zt?xQU3NlniL*@Qt-uze_-nV0(QzJwM((O7g0&o0A`t%@&Qf0@UZj+-@N7}uitCO>+4Ts zUxW@}@9j%x@Aj9$sVgJduUYB*s#+;n!fqkAG%}Enl)%ZI@@&W#86p5@>}(;uw_s?n zuqs5J-&FSiW^5Fa$RB*d9bddqc6$_`XI{Zu1Y7Y=-<=^#AU7_h%;Wphr?c})R`I{G zn)&*sqpWn&a#)v}#Ak|2vBOL*fT%)=RhIaK+H?Z@bwx0==$*r3Lsi*`$*pw7ffaZm z$CRBYD!}r!$6)fbdg!%21XaIRp%ib-mp|DDkFUMM-KSLfdZ#Gh{I}wr=eOY4M?2ah z41rnbF=Q^7HOcwQ+O#Ac)O zKm>^W^xwh0UC1_lDkiP%_Jk{Vo>hFHbL=tht1zwGbgn;x3Wc;LgkSCrC zKMy^I4WpjH`NLB|=a>}V^LYsD-aZTH_xM^(P8W5gJ@6liEy6mM|9INeAj9yW@&D^- zM*n)+|EEy3rYs{mb6uzm<;a1~8jMsd!_9Z4(PG9iQPSyEWM$GZ?v6%12Ro~Y*2ip& z?GC}}U0U!<-WE=12: - scales += [m*np.power(factor, factor_count)] - minl = minl*factor - factor_count += 1 - # first stage - for scale in scales: - hs=int(np.ceil(h*scale)) - ws=int(np.ceil(w*scale)) - #print ('scale %f %d %d' % (scale, ws,hs)) - im_data = imresample(img, (hs, ws)) - im_data = (im_data-127.5)*0.0078125 - img_x = np.expand_dims(im_data, 0) - img_y = np.transpose(img_x, (0,2,1,3)) - out = pnet([img_y]) - out0 = np.transpose(out[0], (0,2,1,3)) - out1 = np.transpose(out[1], (0,2,1,3)) - - boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0]) - - # inter-scale nms - pick = nms(boxes.copy(), 0.5, 'Union') - if boxes.size>0 and pick.size>0: - boxes = boxes[pick,:] - total_boxes = np.append(total_boxes, boxes, axis=0) - - numbox = total_boxes.shape[0] - if numbox>0: - pick = nms(total_boxes.copy(), 0.7, 'Union') - total_boxes = total_boxes[pick,:] - regw = total_boxes[:,2]-total_boxes[:,0] - regh = total_boxes[:,3]-total_boxes[:,1] - qq1 = total_boxes[:,0]+total_boxes[:,5]*regw - qq2 = total_boxes[:,1]+total_boxes[:,6]*regh - qq3 = total_boxes[:,2]+total_boxes[:,7]*regw - qq4 = total_boxes[:,3]+total_boxes[:,8]*regh - total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]])) - total_boxes = rerec(total_boxes.copy()) - total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32) - dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h) - - numbox = total_boxes.shape[0] - if numbox>0: - # second stage - tempimg = np.zeros((24,24,3,numbox)) - for k in range(0,numbox): - tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3)) - tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:] - if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0: - tempimg[:,:,:,k] = imresample(tmp, (24, 24)) - else: - return np.empty() - tempimg = (tempimg-127.5)*0.0078125 - tempimg1 = np.transpose(tempimg, (3,1,0,2)) - out = rnet([tempimg1]) - out0 = np.transpose(out[0]) - out1 = np.transpose(out[1]) - score = out1[1,:] - ipass = np.where(score>threshold[1]) - total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)]) - mv = out0[:,ipass[0]] - if total_boxes.shape[0]>0: - pick = nms(total_boxes, 0.7, 'Union') - total_boxes = total_boxes[pick,:] - total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick])) - total_boxes = rerec(total_boxes.copy()) - - numbox = total_boxes.shape[0] - if numbox>0: - # third stage - total_boxes = np.fix(total_boxes).astype(np.int32) - dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h) - tempimg = np.zeros((48,48,3,numbox)) - for k in range(0,numbox): - tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3)) - tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:] - if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0: - tempimg[:,:,:,k] = imresample(tmp, (48, 48)) - else: - return np.empty() - tempimg = (tempimg-127.5)*0.0078125 - tempimg1 = np.transpose(tempimg, (3,1,0,2)) - out = onet([tempimg1]) - out0 = np.transpose(out[0]) - out1 = np.transpose(out[1]) - out2 = np.transpose(out[2]) - score = out2[1,:] - points = out1 - ipass = np.where(score>threshold[2]) - points = points[:,ipass[0]] - total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)]) - mv = out0[:,ipass[0]] - - w = total_boxes[:,2]-total_boxes[:,0]+1 - h = total_boxes[:,3]-total_boxes[:,1]+1 - points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1 - points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1 - if total_boxes.shape[0]>0: - total_boxes = bbreg(total_boxes.copy(), np.transpose(mv)) - pick = nms(total_boxes.copy(), 0.7, 'Min') - total_boxes = total_boxes[pick,:] - points = points[:,pick] - - return total_boxes, points - - -def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor): - """Detects faces in a list of images - images: list containing input images - detection_window_size_ratio: ratio of minimum face size to smallest image dimension - pnet, rnet, onet: caffemodel - threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1] - factor: the factor used to create a scaling pyramid of face sizes to detect in the image. - """ - all_scales = [None] * len(images) - images_with_boxes = [None] * len(images) - - for i in range(len(images)): - images_with_boxes[i] = {'total_boxes': np.empty((0, 9))} - - # create scale pyramid - for index, img in enumerate(images): - all_scales[index] = [] - h = img.shape[0] - w = img.shape[1] - minsize = int(detection_window_size_ratio * np.minimum(w, h)) - factor_count = 0 - minl = np.amin([h, w]) - if minsize <= 12: - minsize = 12 - - m = 12.0 / minsize - minl = minl * m - while minl >= 12: - all_scales[index].append(m * np.power(factor, factor_count)) - minl = minl * factor - factor_count += 1 - - # # # # # # # # # # # # # - # first stage - fast proposal network (pnet) to obtain face candidates - # # # # # # # # # # # # # - - images_obj_per_resolution = {} - - # TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images - - for index, scales in enumerate(all_scales): - h = images[index].shape[0] - w = images[index].shape[1] - - for scale in scales: - hs = int(np.ceil(h * scale)) - ws = int(np.ceil(w * scale)) - - if (ws, hs) not in images_obj_per_resolution: - images_obj_per_resolution[(ws, hs)] = [] - - im_data = imresample(images[index], (hs, ws)) - im_data = (im_data - 127.5) * 0.0078125 - img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering - images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index}) - - for resolution in images_obj_per_resolution: - images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]] - outs = pnet(images_per_resolution) - - for index in range(len(outs[0])): - scale = images_obj_per_resolution[resolution][index]['scale'] - image_index = images_obj_per_resolution[resolution][index]['index'] - out0 = np.transpose(outs[0][index], (1, 0, 2)) - out1 = np.transpose(outs[1][index], (1, 0, 2)) - - boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0]) - - # inter-scale nms - pick = nms(boxes.copy(), 0.5, 'Union') - if boxes.size > 0 and pick.size > 0: - boxes = boxes[pick, :] - images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'], - boxes, - axis=0) - - for index, image_obj in enumerate(images_with_boxes): - numbox = image_obj['total_boxes'].shape[0] - if numbox > 0: - h = images[index].shape[0] - w = images[index].shape[1] - pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union') - image_obj['total_boxes'] = image_obj['total_boxes'][pick, :] - regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] - regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] - qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw - qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh - qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw - qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh - image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]])) - image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy()) - image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32) - dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h) - - numbox = image_obj['total_boxes'].shape[0] - tempimg = np.zeros((24, 24, 3, numbox)) - - if numbox > 0: - for k in range(0, numbox): - tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) - tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :] - if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: - tempimg[:, :, :, k] = imresample(tmp, (24, 24)) - else: - return np.empty() - - tempimg = (tempimg - 127.5) * 0.0078125 - image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2)) - - # # # # # # # # # # # # # - # second stage - refinement of face candidates with rnet - # # # # # # # # # # # # # - - bulk_rnet_input = np.empty((0, 24, 24, 3)) - for index, image_obj in enumerate(images_with_boxes): - if 'rnet_input' in image_obj: - bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0) - - out = rnet(bulk_rnet_input) - out0 = np.transpose(out[0]) - out1 = np.transpose(out[1]) - score = out1[1, :] - - i = 0 - for index, image_obj in enumerate(images_with_boxes): - if 'rnet_input' not in image_obj: - continue - - rnet_input_count = image_obj['rnet_input'].shape[0] - score_per_image = score[i:i + rnet_input_count] - out0_per_image = out0[:, i:i + rnet_input_count] - - ipass = np.where(score_per_image > threshold[1]) - image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(), - np.expand_dims(score_per_image[ipass].copy(), 1)]) - - mv = out0_per_image[:, ipass[0]] - - if image_obj['total_boxes'].shape[0] > 0: - h = images[index].shape[0] - w = images[index].shape[1] - pick = nms(image_obj['total_boxes'], 0.7, 'Union') - image_obj['total_boxes'] = image_obj['total_boxes'][pick, :] - image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick])) - image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy()) - - numbox = image_obj['total_boxes'].shape[0] - - if numbox > 0: - tempimg = np.zeros((48, 48, 3, numbox)) - image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32) - dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h) - - for k in range(0, numbox): - tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) - tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :] - if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: - tempimg[:, :, :, k] = imresample(tmp, (48, 48)) - else: - return np.empty() - tempimg = (tempimg - 127.5) * 0.0078125 - image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2)) - - i += rnet_input_count - - # # # # # # # # # # # # # - # third stage - further refinement and facial landmarks positions with onet - # # # # # # # # # # # # # - - bulk_onet_input = np.empty((0, 48, 48, 3)) - for index, image_obj in enumerate(images_with_boxes): - if 'onet_input' in image_obj: - bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0) - - out = onet(bulk_onet_input) - - out0 = np.transpose(out[0]) - out1 = np.transpose(out[1]) - out2 = np.transpose(out[2]) - score = out2[1, :] - points = out1 - - i = 0 - ret = [] - for index, image_obj in enumerate(images_with_boxes): - if 'onet_input' not in image_obj: - ret.append(None) - continue - - onet_input_count = image_obj['onet_input'].shape[0] - - out0_per_image = out0[:, i:i + onet_input_count] - score_per_image = score[i:i + onet_input_count] - points_per_image = points[:, i:i + onet_input_count] - - ipass = np.where(score_per_image > threshold[2]) - points_per_image = points_per_image[:, ipass[0]] - - image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(), - np.expand_dims(score_per_image[ipass].copy(), 1)]) - mv = out0_per_image[:, ipass[0]] - - w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1 - h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1 - points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile( - image_obj['total_boxes'][:, 0], (5, 1)) - 1 - points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile( - image_obj['total_boxes'][:, 1], (5, 1)) - 1 - - if image_obj['total_boxes'].shape[0] > 0: - image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv)) - pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min') - image_obj['total_boxes'] = image_obj['total_boxes'][pick, :] - points_per_image = points_per_image[:, pick] - - ret.append((image_obj['total_boxes'], points_per_image)) - else: - ret.append(None) - - i += onet_input_count - - return ret - - -# function [boundingbox] = bbreg(boundingbox,reg) -def bbreg(boundingbox,reg): - """Calibrate bounding boxes""" - if reg.shape[1]==1: - reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) - - w = boundingbox[:,2]-boundingbox[:,0]+1 - h = boundingbox[:,3]-boundingbox[:,1]+1 - b1 = boundingbox[:,0]+reg[:,0]*w - b2 = boundingbox[:,1]+reg[:,1]*h - b3 = boundingbox[:,2]+reg[:,2]*w - b4 = boundingbox[:,3]+reg[:,3]*h - boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ])) - return boundingbox - -def generateBoundingBox(imap, reg, scale, t): - """Use heatmap to generate bounding boxes""" - stride=2 - cellsize=12 - - imap = np.transpose(imap) - dx1 = np.transpose(reg[:,:,0]) - dy1 = np.transpose(reg[:,:,1]) - dx2 = np.transpose(reg[:,:,2]) - dy2 = np.transpose(reg[:,:,3]) - y, x = np.where(imap >= t) - if y.shape[0]==1: - dx1 = np.flipud(dx1) - dy1 = np.flipud(dy1) - dx2 = np.flipud(dx2) - dy2 = np.flipud(dy2) - score = imap[(y,x)] - reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ])) - if reg.size==0: - reg = np.empty((0,3)) - bb = np.transpose(np.vstack([y,x])) - q1 = np.fix((stride*bb+1)/scale) - q2 = np.fix((stride*bb+cellsize-1+1)/scale) - boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg]) - return boundingbox, reg - -# function pick = nms(boxes,threshold,type) -def nms(boxes, threshold, method): - if boxes.size==0: - return np.empty((0,3)) - x1 = boxes[:,0] - y1 = boxes[:,1] - x2 = boxes[:,2] - y2 = boxes[:,3] - s = boxes[:,4] - area = (x2-x1+1) * (y2-y1+1) - I = np.argsort(s) - pick = np.zeros_like(s, dtype=np.int16) - counter = 0 - while I.size>0: - i = I[-1] - pick[counter] = i - counter += 1 - idx = I[0:-1] - xx1 = np.maximum(x1[i], x1[idx]) - yy1 = np.maximum(y1[i], y1[idx]) - xx2 = np.minimum(x2[i], x2[idx]) - yy2 = np.minimum(y2[i], y2[idx]) - w = np.maximum(0.0, xx2-xx1+1) - h = np.maximum(0.0, yy2-yy1+1) - inter = w * h - if method is 'Min': - o = inter / np.minimum(area[i], area[idx]) - else: - o = inter / (area[i] + area[idx] - inter) - I = I[np.where(o<=threshold)] - pick = pick[0:counter] - return pick - -# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h) -def pad(total_boxes, w, h): - """Compute the padding coordinates (pad the bounding boxes to square)""" - tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32) - tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32) - numbox = total_boxes.shape[0] - - dx = np.ones((numbox), dtype=np.int32) - dy = np.ones((numbox), dtype=np.int32) - edx = tmpw.copy().astype(np.int32) - edy = tmph.copy().astype(np.int32) - - x = total_boxes[:,0].copy().astype(np.int32) - y = total_boxes[:,1].copy().astype(np.int32) - ex = total_boxes[:,2].copy().astype(np.int32) - ey = total_boxes[:,3].copy().astype(np.int32) - - tmp = np.where(ex>w) - edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1) - ex[tmp] = w - - tmp = np.where(ey>h) - edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1) - ey[tmp] = h - - tmp = np.where(x<1) - dx.flat[tmp] = np.expand_dims(2-x[tmp],1) - x[tmp] = 1 - - tmp = np.where(y<1) - dy.flat[tmp] = np.expand_dims(2-y[tmp],1) - y[tmp] = 1 - - return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph - -# function [bboxA] = rerec(bboxA) -def rerec(bboxA): - """Convert bboxA to square.""" - h = bboxA[:,3]-bboxA[:,1] - w = bboxA[:,2]-bboxA[:,0] - l = np.maximum(w, h) - bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5 - bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5 - bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1))) - return bboxA - -def imresample(img, sz): - im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_LINEAR) #@UndefinedVariable - return im_data - - # This method is kept for debugging purpose -# h=img.shape[0] -# w=img.shape[1] -# hs, ws = sz -# dx = float(w) / ws -# dy = float(h) / hs -# im_data = np.zeros((hs,ws,3)) -# for a1 in range(0,hs): -# for a2 in range(0,ws): -# for a3 in range(0,3): -# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3] -# return im_data - diff --git a/facelib/det3.npy b/facelib/mtcnn_onet.h5 similarity index 95% rename from facelib/det3.npy rename to facelib/mtcnn_onet.h5 index 90d5ba975440fa18c2d020da6739e96ed5e04905..bd615deee8aab8b5586f605708f67c2fb7d1acab 100644 GIT binary patch delta 70689 zcmeD^2Urxl)=Ni4L{UK$MN}-HR1tQPrAZM~EGQ@{(m@fBZXIk07DO-h-cdlYps$^HAzem>08lw@)DT=_`$LWWHf*)&a-xsc&0^2B9KZ64h^)4qlC+xgZ_H|~w05iToahlFMxe@T%2Vcz_K)z6^osG1 zhzt%3rSn_Rn8o@o{xU=`>dO(NAi>Y7v^Z0RXU#UKENRfk+jq2os9%*0NyabRj#wB! z2x};5hmnYHSZF|SAYEM29$6V>2V2unY=g{D6+@Q)R5reyIy11H*sfuQx4iW5Cckq{ zrgUYl!r6>)Po{TS8hBYZ88Sw)(n8|K_egep6tIOl zU75a?p~r@}h`IYQnP@I%Dx|RRGBi|aIBxjnEMee^moX`i$I8F5hKsu~EylAAa|C*@ z_ndNCE3p}D!ZqKoYh^zS1imn4+N_H66jKH+t9w}*%HemkbL}mmUWWnXI8k8h znRGMhW=3Cg7MF1xc)Te8(8#cefRM0Q29O!7Jf6gk&{-37eM7t>BfUbs$N1~=b#(iP zO|-57siVsnB|2|{E?3A{D^pzq9o-P`akMq1OFe%S2Zx47M|nw#%Qo6eT+mxwLtM2~ z!#>_ozCm70y~rT%a4`vPS^2^8Yd~#r8)7AGBpvm6B+Iu5h<<_ z;2jd_PiJssa&%0h^MXTt!lFa{yh3TdB;*dKbAOOb(-LDWA*zfN>_|G_q^A_MT>B-J zq9VM5L%n@M#6(0zM2l$%2o8y21Y>GK-_jFLC_Uvx29KApW@?}#VJ9*wBABJe*g(fv z%wf2ljAl;H~jCTkb>?cO<=N;wk6%ZCN#yd(};2Y!}N;enjMH3q(DGv_uj-sbD zuL$ob2?DM)Z{Mik7)g=1UPy4Lzqds8(UJaMKEd8%Nw5U76b6R|M+JL_&~!%7WfMp# zwHWLj5$ql6>p#$!hM}pXX)D!@k#w#Yy>T}qxg9Vp_#T z(V@X&uEfnm`uqFQS>jHgB-z75Sn0y7jes6 zyA_s5WEC7W&Z|@@$&y4@WE-W~rFo$HTig?++bma5q1t(R(yGV}4_Zp}--JoShnLmH z@Cg5qXo)(KWd~5d0sg%Qm1Vd@+kaH5Kpnk9!h^i&0am7St07HN%mh=plvI9j&J>kn zovuU=;|PC0FYh>JxX}t}B4)DuK%^x?*O!dG^70@VYc$3?&MQ1DOssMyeqLr036vSA zeZ1rPvE`MG)(YCFKu}^VrG_cVjMGwsGE!_H#r*vky7T}y8BS;Ybd*-cdr7yIXP!nx zibuQH29;;ot}4?csZ#-qKTMOk8BO(1O%o}xm7FyxB?j6y{S&5%lo->BZIhJx{ua|j z{=_!bcrb}opoXT2WGzfZw&}+xtTWS8Zj6v(168?=sz5EPG*OkQWrZe6dj6@*;{Pra zMKaclwNYZyE0afA(RD9BUTbWk#BGqe6%1f08&%Fkm5j|_w^6kH@uZoT7^$CeFSbz? zIDA#wC`p|PSmbOJjlXIm^&4#z`HTM3ui7ZF{r?sJ=~rzO`HTM3ui7Y5+HI$mvHVD@ zY)nejwgR=R(ngi2B`M|BS7@Z9I=L=w6j@_GN=$lX@+d1g^J>^Adi_K1NYTCcW2sa@ zBNYtbGB%2HkV^Rv)pVmI{!f|J&QJZQpD`~sQ5ATOKQ~e0^`E3t1uSwlieB%S&}&p~ zYUkE?9Mh$YaLxTF@*h~^NvUrcKZ^VZ)_6a&QKW8Vqp60DR4x4|TK{Ub#w$NaDjSm$ z8&!c`GB%3d|0un{=5`)_RH{HP>(WM%;#~kvc9JzDJ&ek&e`~td6SG^HJVLz3`1pBy zv7RMadQYmiq)1}5%A2Lq^(qibKy)ZQl}mOVJax@t?QKdv{l!)*VD$mzwJ;PI|rMf(Kdmx4r7Le+JyD78x(63Ze!FV z+(um6HPlf2ap~{jZ8Ipw+t$=M#MdM!$k&m+AJaY7Byd2i?-d4wm9-A--d}2l+U} z1$(=9_Z{FkIK}{5EJMd zE9TS&4hyA)$@H%HzjzJH$;pe^XruT47{Q9A9u^%X-nl5*pffh^E?M`M&S&fC=tg+Q zitBhs1V)eX4~-K0XcHuxGv#*TIRCODt)S;$W(PoQy(@64d(zvQlI1vEE<&o9fCi|HQ{Ur-ZR&Sf^XD0NQEGSui1MMGLiZW_PN(@0|wnAbC zE6|E6{X0pW3e=*U=@jq(NpFo;HY3%+8dN+A%Q!eBw_VMyK*fYtCJnK^n^&-jUDX(2 z>Qo>NdXSLJDEa9a`JF?Ath1fu+6@ts*|)E7?^CivU13Sx4G~gep3A62DUB%6|DUP^ zJC&B6Nvl)|NgY}xNa?r+XE?<>hn1VnI#Y>K>yNq*E>uG$%KBe*99;O}T2;D%N6)_! zHKWINS+DEYRHA}4am6aZ?!QY+OUa^GYE8_kgfXp!b*&R*nbpWIO{KM(Nu}a1m6`tE zwiqEpyvlVwE#*4aiO8^kDDjG=yjqaD1DP_hFGo`%?PHaxS-}+I9TzP2{JI)frmqss z5>HyCP1tZ6851R=_n)}Sr6+a1`~wWs*-1$Q#Sf@PUIM=+AT zk5`mns&o7`r2jt|KX+HoMdyVGDM1~!pYmHd+c|F|y;++7UJ9y}>&`NzE& zm=y2hF;Q1@F|_z-3Hy;W{o|fnf3u}x;8JH<7*B<(Dq+d|2M=Ok^B+`7ZW%fx{a-R# zOX}CfF+6b>{WykK$@xH%?jkiBQ2b?jiy1h~HIFB;1MFC0Y=RiF{D(PS$CEx@_q{x1 z*_{7m{Vz-D@45T`#6M%2e;n{$LLXX_@Xslmhan^6ICkV1u`*Zg%UqPe4P)U(Flj#) zZVhA8Sw-gY;!~L#YZftSI18uwlsO$n=Q8rQW-~@U=Z=lBkiR{tW&d-gIb5HiP=`JL z_hY<^kKJnWm?OwPY;$8BD|TkI=9x9%nSCc&hB;eZp5dR3_sUPpcQR*M;-=y9?J_yF z}?F!O|RXI&s@?&CT#-DgTH1;wn8MjW=Mo}>!H#(&43GKK@JYX4K zLy6-;j}DSPq5oreh&>lEmE36H4w-T(OQ<2U9O4^s_u}OW-$+ah$75N-xI^k3hcfiB zeep*|UOfNOAI|JZr#h0->Xd6r+HS1VM#3SFr^C}w=4o*1tw8fD#w05%D?uoB+r%F^ zQYObja>a{p$fV5wCajqdL+dULrVZ||c1r2ic|7iE8hD$lH@E*)nRR)BEuVu;! zsFu}#x!NE6KM|p{0nuS$PnB}K#aY~NWQ%#+aFnPzoylrAS6n)+SpL$B$b5Esho2ko@jsSJWoy*!T{--9+kBO+b*d7A z1=!nmqukVn^mM^I&iG>{VGKDpW9hKqTq;LPUaYk>o)}~-Sf@LlZBdetyz&!6Txo#c zd|g3zRrX|xvNJPsA!j!{csb5HJ4Gtl=E1_9hr(LEW*<3IkC@1N-yZ7JywnoF69U{lmu_Z zK5BJ6RA$nYkz-n7+ao!D#o^1T3w3}P4utl9kEl6e=$T8N>m$~93!sY+A^#7y& zl@{iIApfO@hnQ5(p;*HDU>y14&3}_$jaroEpBo+?QsK=C!WK1-TF``+iCT#7#oMH0 z?ZOs}WvDD#VaT*qHL0R+NsT=+*4js-&iSNsAPBC5{3tK@_TK*LjOJ`NMD5QaM zMlz7aK1^EGnsA991E*X={7kJn{Qn1k9SXB%O-byGSpM8Th7Mc*1FJ+_bH5PY;F>6t zmiD$K{hu#RoB?}f|>BV2|V;o|KmfPYh zaoNjKSUz@IMT~>vl;fOBx%^)+Jlv9+Yf?ffV;n)V8Nw3hGHF$Y#c3V` z_fx|n;j!v}|KsDYN&)=0@-J;zxOH)+$d!;4ga+#;t&3EvXeHg-0=LcpzQGE?l|wFTtn5#C@p`^XG`qDxQS^7Oj?~S z$7zO%|A65L&PicpQOruHw3Asygn?w9j9@n} zxr22-P;gfj6Ihj})NeUM7}tedYd_M`^8bT|{F2OxS%1pN zGs8qWgka6q=hCXnYkdBdk-wbGjg_o9wdS}KbiQ543=f6X;WJOg5q?(v2 z11qI}m4W=${6F}s6tA=a(P3+RDEVChCZkr7gP+M?dZ|?)^~7cQA2_IzmvR6QO9`t8PVy6ko-G5Q=;KMCOO{v-RTYF*6U>?i%)<%x}OH=&*n{`Y;z9V5zfb zk^*ZME`?n%a&XcM$dN2u@k3@5aB#dw%)QPNN%~))f7~KK^EyMh1>1L}m&oEcOsyU@ zA;n%lx5Ha)r@z8Dj3f#ombx`7dQRkA%H@|1!;s>)p740peEM0&Vt@pkiMX(r75Vrc zC1q7y)UN+4J64o+hv}kI_n5RQ6P@yaeV@ZmZFChZ{8Dl4`2Qu8Wh(!{&y%*%+{*g} zt3;fx+hZnle0fwXjATePploOX;h=KQB=&nFe`;+)Qzm-SClQ@Fcfg2 zt0MuGH1kLQOGNRnTvSK?(!+>zP~xTDz9tSTn0XnOtOKg!p!^g+H;klT_f{tSQH_-k z7Z2c4mQZo@gQS19l6fx_O?j=tAL4G~q8}UrF12R+G@ebld@i--*54dJ>7`N4Rz?Ny zIG2NKT#U2I=*Lgx&rd}5^^2mkMtu=J$E zj4gT>o7JTf`7`eqr~O;@!%Ys>Tt*F3&b#G(V3bgYMJTnePu6Z@U+m6uJBGisuTS&O zaT?EZ$lY@(mtT9s6DPwSOeke|Si2*ZzZlWK^?Ze-|NDJm`Y4<=7*!b-Bl@*};?K_v ziv*zRe@~w0va*{O?P@0f zZ}am9f4ScIM>8byugrnBP+{iHc$TC(it?0Z@K67jmc~DSUFY(bo@Y6q>jrzWh+~nR z{zzV3M|rrjOx#hX(hhw2XIE(ce|MB8t`ReeN}@cagFNixYLaLvCz@jQI&)XKCM!+r zy#Fi3RB^!{{1wYZdKhu;st&76TrF|%MEpK~&YP5SS2?gs({l3X*1~^rNt*SLNhijRq=<@>n-!$Q{YrkE)i%I?WWBh!27q(P5<<&ngio>^h9{NSb7q#bn8*j3%qSoF$XiW{!}$P5&4A zUpmSY)STNjtyosva937i$A86O|48n?E&nn<+r*n0wM&lv%F|y3`m0EPmFTZB{jEoT z>(k!`^jC%cHl)9*^tTcHRinR+>2DMIt4@ELlA!#oRW?dgGGVjo{_|PtXOmQ%96L#I z^B~=Cv{cK_Qvab*qWG4KWa3$Ut7_Id)rNuRw74QQ{kOI15B`dJrS9^iCoRr%ywQTu zjS~5DKP<~lDo(#h17m6x=~GV3$BF&L5w3mq7f47R_Y))*EZ~a-2808YevOw02X~DL zyqCa9J!TTe!WF+~wmvzyvSBNl|0OJ7oQ9_4iUVB3-siKT%3D(-SJy8DlCTlE(ftOVE<%rBT|dY791h^ZHU z}~pq1?s^{m9x=ttP*j;t|H*ZfmJbQ|-wU;vx(%M|@ma7l=Khpo~8m(lB z#%slzv8p8TKf-@iY*tz>etA>r&)$GM4cR}jw$dW`7q0)mYvmvQ{{^Khg|bBcmBm~8 z{l&~tDQu;x{H%T^HFC2`@Ry?LpS=8o|2IgxGB>M$eb*p|l8(QS^h*Ov!X$O}i{!zf z;n7iEraWKS`OQooZ;W@GS9n-hNN{MNiJzAlT~5-!_`(6u7i9suOByic%IS-; z<>r!dGp?Mz2=N}{?9$M3w?2vb1^pQd#O!DG5+el+Tn&7>*|$R!j<1t461>&YRrH6wkunq z<`^>M78^5Zg_ctsYv-C=V#)xPVUH{PO=%tQ|4Wqp!CzK<(xy{~E&jkN5eHpYp|2A8 zORuX^SncHuk+e2*g#2P!g8vuWscrS&fS-}%j|ZG?0M=|(M57o#H{RpB{RFV0%5{+U z8~CpgRX@kyLjMH_PkN0AO-fbT>xDMF2u#H!##K=m^3E_Z7zI5EZcR0N=P%pXEeLO(AJHt27FV^3p<~Otgame z+Au|=J|RW0tn(z8+;<8dK7S1u^J)Wf)f*wQcs5*+YuX$IJG29LUN(h0k2b>56Sra$ z|8%I_d^8O548j==O<-zjH=IX)B2jF!XrOv+0sM4zA>J6@5W2suhv(+(pl#)q%0Le)eG}rpf^wn%(RyIQ+SX{3Zv!~+pbFNT z#Iy1X;N?B{Xe@BL*GSZ8*l4Jy6pfR^UjYY~S4hu%07{Rv0%WII$)H!0C8$_02rpji z2Rrv^3Wkr@K)WNJf>Y04qUOSeR`%B#=8d_Of#*KUgkN0TV0LDI>@s&Mdb@fu@Y|#T z=No9@xQWWZ`oL4F*()JL8z@{ayDiks)5Q^nh427TfKA{1jnz(^h3ReD3qCy46&3H& z6ilJCNRh*(l~`1?9Qv<0Kt;FBCfxG&QI}e-BX+2qBt~sJNiEoSP_Qrgpon*t0Nq3F zP`;5f(AeOKW@Q-Vo%(EK)w<(aYT}4>gq}t+fIX8@Bblkdc*8uD{X7y6YcLKPT{00Z zjO;G>tHTYv#_<-MKFUFm(BDxMdt_8zr_tmntLV=U0|6(tQIJ zcbEjbZYH4_-XDlXi}zFR)Io5wM*(`9#20(jCD#%{!uoR|bI3W+PF5@+~}b{vE!&+CFdFetWB8HG8mw%&|fF z1}TJBYj^7WiAA7i`Z~0u@Hp7}H%2cD)`Az;mY{;o+Au1uC2n5$36ARZ0l&LnNW9s^ zL$~I7=RJ?`w(4X<0W-}^6#JPF1gjAu?W3&(k2|#z6?ZljytgnBk@^F`<-NnvT`y~x zy~G+@WU9huwivuNg`oggl63)*Ao#P|KU zh`$hB1;!tXk!_SR>apw#G0|{39vnXtcDg$@uT8fItNeUJ(#q|mVIJk}C0MO7LgZJ^ z8e}H*LV8Bi;pMa`*wrVOXb~`-Qgs&z4vZCv48LW<$J!Z~eE$_%p8klhthosC;WIR9 z-BH2GZ%0KIfp-K3=WdIvsLyas{wHkwZCoCCcbt_-_d1nsa*UYcl@1$r$-q?o6JVm> zQFJMbR1<8nYb+Y@_bJrwVgZ=kp)2HD>*M&R-g)Vdysd;Y4d5h^J|6z=0(hEs9*qxf z42LaO$KyTT!@~_f;K-P1aQD+`IRC~lFo^Gl*6-ssX-&Zkv5Y zo8s3Hp?Wi@7li`Y%Gw6U6?Vj1!aKk%k0<&t{t_|7@dlOD zD;gY_8HVDYT!W`qUB#PQAIF1Jk3pvonXu2-9r)$$)uNq+s|Bf#vZ?PcQwXwwQz+`1 zP6F833ZATQi+N*)Qw2BH5@Qx0g=P~D;x4QEi9Yu1C(vxU7wb>Tf#-UKVk525aNK7r z!Ta4-qT;W^QB3<#u)5#_G>dM-2hYY$`g#G25NGsc|2#P0)_h#BuLr#N-W>ncbvA&a ziD+)zbljrzH26GRC>+y#7}5L91Q@V39v}Yp1jd|xhC?pxhTuXrKI@tfbe86#R;Y)d zZI-1dd368?zT$^+&RYSRN0fYf9C%wl63u!@5c9s0FNKdS=TkSwY$b#a$B_Bgqrj^d z1>bDki-}9NqVQ%mg64ZNh*{HpsmQ_xaC)&Kp4&MO1@1Zmf<%UbreBOi&UYN)_a=5& ztJOQ?DEJ6QXe9yF*Go`6xAs=6tlQ^3Nj=Zc$bBR1mvBw+(BzuP%A*m8m|G7$-lzCdxJAZF5t0Tchu1AnW$sZGr{A6 zIJj==SbVjczMy%xE+W^>2Ebx-d*r-x16rrC9jIQhpx%5kBgXWa5A2SwN0)mkirf^G z1Vz@S$Z4Jt2w!RhZ(r<+J46{^($HBSJ}_H_uUo8yb0P-e%ac9fq7FNV9O@jEe<@jT zta-A?w#8uZx|u8D|J_a$o7!G*>w*ltFkc?0eO!xrH`xqc@Ll0k=Rvr*(>}pG{e7aM z6F$iGkp~zPbWkYYC&^Nmx0Xl@-bN+dv4)=G1bD-!+d?9-EwOQ}8i|*VQiTWJT|%$2 zPk<%zOGRVw5`p%NWng2=6(~?91zbA299>!;f;S8bfaMpTjK@^j0Z*yLCheI>uQlnX z%f%k9tSobK^GA5pVi4oUaLyTXSAMY>#}w4||IhuOs#~pW+b+wRkxW(Fb|v`L?3oH> z`K_9OUxX;Z|BLO^w)!9Zr6ec4_~gQ)9;`BPpxo{iw>EU*3TmD*^5@Q7XbKo5v|tfd z=YV&WKT1;eXOhiJ|AYa3Dvjf@Ea~M9O(zL}^e$J)*$Gw@+ig9+@AvjA9piKI-kc(_AjfQ#K*Q7!fg!H$nZsTfTik($wB z=n?D#hxRxDrfak&Mz{Ax;UNdX7+)K>VftlZRK6(=N!iSQ19JJpRn}9g>qY?muxUvB zbbm1R?HVdQ{XCdITNk~YIvTBh(HBO)2odOib;jg=LtQ~nZ#mKXV{O5-u2EoeL~j9Q zl0+!WG=RL9vZ4ng0UhSZE_cV{iuVkJkbK_6Fo?D zE+!6KHjNbSP7Q??DsAy3lQh)u&=;`I{2*8`as*JV*N0NjJ&*6p_JLd0r&8}4HiO5T z??88a8Vihek@!W&MeyRzh3MOs#%Rks5}e6RL&cB%Kz+X?cvPbqO!rv?NAP^$s5cox zS?Ge@44rVurhV9d@O07H<76+w^Xpp-2d1K?yEJ5%!}v-4S%QO)PeC=4EbxA%8d!9nQ95PA`ht4sB z-J2c2ZRY9VFJ62?_R($RGEfF=yKey}!dPn8r4DfC_;i@3^#%@Fz6duPr;D!%FG1IL zv4V%GaX8z$4eEF*4XjFOB5?IgM~fzJr#Bt4z?(cD@O9}d^yuOs>ixdUVDH*ov~8>@ zu~nrPAh%3=BwWjF@C*%RNhKZyE2$Oiq1eL(Kk zQZ!jx4yqcAgik!%;g8rKoS8WV_@7W97EGLslw=b{c7n#(wcr?@vN9R=zi){?r+R|( zPfvnQ3m#Hl6Z_}x=+FVY?#c&6BjymWbqhhpG;RDchld)Q1&UO)n#jF#+W&Cjd*ATSV&6BgD?k&xFOo zOulZEE_nMO5bi$K3+_)Y5;h?Jf=`4Sz{P!;@aTaAaG{3^dZw?4!a}sbVsu*=w(2zr zR`|}Nx5!RlB4{BxHvXn?;p-bnH9-@DLr1`|XLsPB;uyh%bM-|Q*C*r7%QmC$I~H2b z+qjuhDEdegG!UV!jW**!M=h}7OH*+9SU-{c2U*ejZS7%{Z#?YNXfzmEXoEMj3`R|g z8X(OF-qdHq-RN_8V`!Nq4~n{y?a zV3t20@0`~I1UH|A`I-jkyw)uKwW9UZ%LzM((Sf)5F>O*2YPNznedZN4=5P~1@$_Zj z)3=U-vmMq$Vxt0(?KK;P%#R1BY+Ay=uzle1{8#9tkrFhsGX-zS2Z5l&jLTrbD?8!$ zmK%{lw;&Mfxdwp_L%ws}4=a zGwc)bW&%<1gLETy5kGx!NRNiPy-VKsPOV3w$`1A$v`Nu zceR2&40k}wy>0RM(W$t_xXtMJw=`6*!!Z~ioq+ZGtq@GOG7CNh)1mBfvYzPBnKt}g zqMrc{&E1HUta{>z)B14yz6?OF*?}fL2|#uZdSGl|I2!p#6M-n;ltH($U0sVW$jiFC78Dr-9E7kl1r{Q!|_Lqh-b5==Nue6n~(S( zDIarJv}9B$zky%^Vb&&`3Ts+lAWAp~lhiK&KlON!GBOe0ZkL98v~Ev{E+(Vb$#(ea z#@@(Y0f-vRZYT;0enthI9fVCb?0`?>7NAx;>%mUNDnyfzmz3j`lXz?N75+rC5Nft@ z0ZEwETTNXaz5-;_D@2apc7hix3F71@C8(G;RCqsC1$YfxN{yr&$&{auV(z)4^9v3G z--OG!UTRlSwp<$I?;Q<0tZz?iu@)M#;1RmI{Ux$~ z*?kvfWys@A`P0DcwKJhg9#8P8?It)p%O18mss`(a1Y*VDWE^<$7EJ4>A+Rl;g72vW z0S!AN!tQ)i!M9JnF|Y3-=+n9YIQew~Hqo}YzKtx=F$5uf?JYoUOD?z@z6NwTvJiH@ zas-mYU03oyZaIj1-JT7<9Y_$YeLV%dJ7^(jRIjaw$neJTJwot7x#_lcOEzoi&H#9T2330kvZ`j@L5&ymM0Q7jlVBqT$fpA}l6DQ3;udk{A zQ??-{+mU(zbm*a4BL_4@M(+KZ==8CSg zzJi*a!k|Z^QJ{1C!?~t^??gfBib&(qD%is66I6a6Cp!B~K{S4kD&A*Fb`fOjy#%cz zZV94%Gla3(!-3YaUg*B*8_HET3*5$ga3_`5;LG*}xL1$Xu+XMG*xUIzZ0xrfJf2X5 zY~770l{Gg&hr#MF?dg52-NOb32;H&%1TDeWf@8uUVS7Hm%`nhn)I>1X=R0*~;RY(< zWG*~uZVElkMxmi(@EK~EsXA8a-2gi_Nu-QcYkN+;eM1C zV@aslf1(y5vHfMyoIc?=ME^WK-zpx5y;_1yl$Mi1 z|HB(lH|Ij26{P{6+P8xzcMhg5nOIP`tsV$S0q8M)meczx9T`0}0enec0S&IVhPz+9 z0@l{;@SqKQz^k79;l#kT9-@$Vs`5Rw>gQ_K-shmdu4?a4>8QhT-;} zN3iT`HQcWMI=JrpKtXYHvJrff76J@E6oGY|k`KR+QL9wNHvieCo+Rg)C>?Sl|$X}mjAfUcfulyAg7$9<(YBeUsHssZPM<#? zk=YNfp@k~5(3|lRksZ z^ZH{f3KTpW!b6*$p91%GFGa?e+JXjWY~c}|7Wl=IeBogCuA)qOU3s8q9=u{UAJ18( zEXZhcih6cEiQh~k6MYO$gc`<;NF2UT52r>0QRMtI7&mP`T$|SmS$`TwgsrngV-?H^ zN3&w!khcphN;(5BhO|Hfo(WKUn*i#a#z-Pqqc=Dy`wnXn3Rnf&6OG)r;l|J0@PsQo zaBissFj{yWWn_!c$*@i!w-FC(PHT!S^{ue_*;{B?j0<)yBnxofJ}-nOg#i-_TWY}e z7&P5b2~H240u~)Hf|rauk+DK+@PJ4Jn)Mb@uG3G!k(;#P?cRzYC}A5=oK+8fri`)g zm>qbX(^xp({08jvNfsnNTn<~=cgN3FjA3UT8_^L}3Kt2~@Cy5Q!Z#Iwbq%$#CI2gb zEqOH?eCr`XYr1$seV6@cIRyDMIP^Es}D+Z>gG*KYr(vZzMa)@T%j7H2x|Bh^A#;5oK;#>PJAT{~UKGiEO0fhxJSNXJuNS6=6+m zxVXv>6Q1VFbQ+Jr$hKGBga=7VO7OFAzb&aGen|4-M=NX0NV{Xf!z8Yran!`#ulC4gZ6vmVLzjp*g&uc?_4axix)e? zfiK3x%et#!#XQdYpAaK4^e)q zK2*z~o|M{(G&0A^dOa1iZZjo&euZ#!YAf7jqdJ;xO5vLd-SB`>*HDKjPvmX#5ha8) z5QX(hLBUJjps8CwfJu+mg8D~;;dj+ug5LHM;h^3d!Mwq9QRA>gw6~}u1Q)l0A%YGd z;Jy=jeEbS={<<32Vg8;FIE;a3JbFQk;I-hwU{W6FCmbdeRQ4nF&2f}w*Q=n3XNs^@ z+9_bC=R@t%H3BdEXQEb?uDQM90O;%17S6JBLg5D2ghrRrshMV1(6IVXQF?wJnm4~5 zK2I&d^?XzC-K*>2ir%weyZA)dZet=fGb@NdhHXW|j?csSY4>p2z}xuYPy*|aSC1h+ zJV||UT1t%cHp5dse?Xr6?kL@96>(&NExe1T!bY!W!c{KhX)*c4Jzzvxy-T8w9aRRy&lsRK!p6bue^uG=s*6A5dsCLJ6Py!@MtZ`7JMKVG;Jg z7r;m~dteB-x*-f_TWm!Zw}#{5UWX|~C2go@H2|*e(pls=<~#21t}NQRvkjiQ#u^27 zYXsdktU!I%wgMv-od9>J&B!{#87w{enX>9=Pc8oNjU;x>^hCE`y$4hLw*!~rM})=B zt>~TG5i|&QLA_u0MSnNb1K0dg;qlMwfpVM@)Ek#=Sy&uLSZ!MeWu96?lNlzK*B)-o z9kypIka^OOXtuVa(Cv&9HE%HzPAu}KmS-9hk+*V)f)&#V?|C!vjT|}%5$P_#-g*Lu z)8siQ%1*|UMoz*TR`GD3{)VD^d>!iW;e|MTF(1Ak(q5pen+tXFx(KZFFA)7cr@}8j z39wt!;dpDPKKN9?!}HE3qTRj*)Zydzk+J6uV$P%K;Nv?3n57^GoEuNhb#UE)_nEZA zo<(D^#nyVnowb+2C#}`^$j`-~c2OJ7W%DN_9I)*AY9KoJsxi<}Iwow;ybom*(gJMs zw}V%;ETPKeL4>@T9Beex1n-FFfx}AvaMkNYlw(FO>a+ZABDmKnf^6n^5Ik-D8Qk}o zM|}(XLS(J!Ksd?VMA326@U00eP>}K~?3nrt7p>fb$Eq7qF~QFW-MdY}sH^9IADD)^ z=^W%Adq0-%)W{n1AM@dCh4!$%>nv*OLPc1# z8-DI_AH*JsCWUX3yoB4wBq4W9Ve_>)l<&P%G{qEvfabGM18^7*8+w2`V&@AZ&YJ-* zk9_cXn-loHtushpAVU9(z zK(w89pSWD$LVD6kP=-k=WAlR)P(K1LKG=?XE=vppV-Z2ZMnhypG zXQ*L8f<2bI90f9ZAF_P=SOfG{83BEkw1hLm2f+pbeWA#-7hKJk6U-XB0veO?i{Ypl zUl1?1CxRcoqr0|^VE!xzhrT-K{W3B=?HgppakYG%7KN$^&eKSM;O9X`dYXel!(TIu|t_@{&UnO9R zX_TV%TK>c)hH&!H7SL351F3K9f_G&!6U}I+BYMtjCDK{b93Shm3iLSE6P>bN1)feN zQ{a~?uGIXC+xa~Xtw!sjAKYwt99-@`1-`p>5}sD4+gav zP0bpTfV%fzh8o&j$DiMSMC!8MNb80c*l(>0wS(87IX5D}LkDdjcWnu<&`ySrPN~8- z^%2$O%U~!!Vlz>+>j_%u{|1JTloNs(s(2}&aImMFXwY{v>bPS+!Lr94p{{QO(H+lZ z5N5R(c}~cu3R<>?{!8D1EW4qw^~kpP?4Xm_<%%caYIp=3a+ry9uBKy`aWnV>A8Y{c zKZN1i&U)b6mzijt{Ul0ZW-3yjbsTJ#T?%qn3?ZP-9pd8h$8-!aY$<4)ejmyz3+0Z@N5d8J|IX`BkE!-H9Ma}Kl5WAbq#)o9(z?bD$ z2_;~^P9#{C&n0AK3PFbP5)Gg_dHo;*>J@JsN*`{80wUC*(y}&u`9AdsZ_jY( zVI2)7mzjVyLpSK)CJKv_Sj2o`1U<^8c-j{Pwh!) zHobz@dbdGuTn+&;$&#XmZvi8oJfTi6)TM6cT_;SAir}eP!{FsBO;9!|8VHfg7Le@K z0^U(mKn2-G`0eZjFr(jKbWB?Y-+8GB!?kZw>I2^5^LmYtf!j*LqJb>fvql?rZR(2L z&2`YSSxw>aSzwo@1KJ}DKqrp3rzSqRgx2rsgpL+; zLrvtj^XuK!1LVq&li-zm>CmEuHhiRGhU#r>g;Eag#@mz55Gk`Q5jABqy4NfR7(Tki z-;lJ6;ERfK&CX~5og*)(rw#7#R|YPo(EW`lC1oa_H*yJn>OwgJWj&P@3p49o~=Zt z>)ok^gU%55a~l#yvXg}e1~vfo#;+obPxJ;0Ht#3;2Ac3q8}AohKX(po&0Yi=A2B4R zj3yhR?$$Rchmkw*y_tb{TfHr4XLD6zEIk9IybQ%J`yWT~i)8V9`v59NR7kWO(i&>E zYE5YDD+aUHtp`u~CJ{qg@Sx8Z3iSa?g#9fbnx$`vpS;*hA8Oi-u4-Yv&5^0p1?wFs zF4vA|r}R{K`K~q}J>E~f&xsJ`k=c6Gr7xF+rdQ*rQ5kEfnOz4`bNAf0NcC@L+471k z^>M7CWsZ|!ZvKHd;reF{grQIG<-S|ylS7Vb%{LgI1=5?#pmv1_$95SGQhW7;W1ZF# zeQu5gmoezP9Tj5UYHZ3jaV_V$2(54oJ< zy=yLLnfVm$YX1$Ght8(;pct(xT15x1nM-Kgx(5>{LL?gZ_l1<*l zmycS+zdGB2a#>*m#tke!&)t5ylxp zHR2*Q?b&;rQrJ|KvaXp(zh@&naA14<`9c$T&~hhIEpQ?JD$;=$1XIvo*KA?bd;xUL zQ-R&=5`gW~`RJj(7BPB#Jv1q{37D9E0u>zJgAN=db@1i&S^Vva%~1BR{@_W>E|B@I zJu&6H2kLw*8NJ)~FxOYX5#7$SrS?V4$DXdbSmj$b{B&V1P!4HKiE_H4`As#1v%@r? z!u}&*t?v}-WBse(Y}8QH({wXlcp(5U_B6msMMdEB;Q`P=XBS8eUIg#DxDtAAM7it9 z9dG!a*_gjd=bms>pgA!ws~(YiA(%2S`$lEPpQ7Av@`RU^)S+xa654!I3pO$A0DCUX zqT(BbBB5$Hl<&R*_+M)WZ?9er7mt(2_hZem#^g-gF}k}b()%-dNXPwFtA*ez7P=y} zj#F{M+fhRQ1G|Ke_GVeO-}4}cd|*07IK)_2SXB5pchO%OLds3gvU^~j<VWH9&Dt{9s?&+F=ztyr( z&`DE#%1<6!xE7$fdPQK+!z)nMAq6~aR)7}VRfTt*zMw~^4xy*%lsQVZ=gJIVjZ9;5X_y3!UFn4=Uzt<=5+o`TfssuuM>xPNXckuVt4(O1_RG^=e4E!@UfJ<$j!}I7Eh}gG<_%!w~_@cV_l%5Ab`JO6@j0zZdLYR1QcPvHCf5ma->dvM&G1kmhJ z8~E~cU+`Js2uQa~fx5A4DV;Cp!4emJ!fu~7qV&&Fg~N6Do4p%SE{5}{oD@F)sLobO z$2XrEvV1#b?%NWn%~yoS1`6=S^;7YdjiZrqzYX}4+BD+X%y+1t+(c|Be~3T(q%J0J ze(wtJX6RGe9tzm2(Q%?6r4ja^B|t9k2L>*)K#qBt{CyJ)KtLe|CTix;YX1uO%|4L| z`{WK(o_|AAo}EO~zGYCm%ob2b1Y_W?%Q3L&?T;`z`2o1~y(x5mqb_uwauhkwdrMrk zd_sh;okHQLWYD>nEmGxmA;G8P?nHc_*5GB}8c6Pc1k4TxfYzERpjEet=;7QfsuxiR z!mLf8?Zx|`ph;ta%|Hhby?+rtYj%-(w;~_eHj)v&YNrTSoNoY#6^j9-R}2>PYz|_t z2*C#J`si5RVATGg0~Hvw6nVUR4N{J{LCYJ4@c7+Vh;Ks{q5ZvQqMlv?_P_O`v3iW0<-UN73onWK!K;KH*M9_i z$Y!wpiWV@W?*Q7_m<%;;oChtoxxt>I#qjeo4~X{UgVSRiiM6_2p|h}&Ai9w()H;0@ zG$ETRpgz~85f5j_LHP^TplH?rp?kcNu+2+ts_>Dv@Rj~-p&f|IoxSjqrJeUq{=D|7 zmZPH#_%FQkh%Np13-2%Qg1!2EMi+_}qQ-jOcxZwyULE9&HmWs56K$7($BG_6yYWU4 zdZQ=KT&)L&_Oc+NV2ZGCLQ~|oKm(9l+r)#FGD|_PO*?^R?*M98gGON9lipy*2RC%( z%zCI3xC^Qt2!MMZ0K(+`O@h+eM_HUewL($q zU4c%TCrb4E2$p%b07sWkM&DlXQ6t(`I|#pHx3!OQhxwYIlaqpxb%#aJd+bEyc{K*? zI-X#eiT4QSwM>DEk!L}{+H+vU=woPl(rIeu$}{Mza|7%=TMdkf>VQXD6Vzn8uApyz z21?$mEA)FsF5~yIUzO`v|EA@e7Ydf0ZYS(5Hc86;HpRkn(J%%6?w)43Jzb9qiwxTE z>x+6?nv3S=E+geF-SaYXWwm!$-U+?h*QZC{$e58Bt3XuUL;$R-O^@L7ccVE%JI;@ZtOeDp>i zZtR+g&z?=jgF1GBlLDp_mV2(lJxX?hc}m~m$VQ5A&bMher;RT*_t=4i;Tv(XtqJrG zmI1Hc1cH~nx`Vjs^61p{sVIP4K-qmz!>czHP#DCrbYZ#wlrL8nhq2lpxC83G4z{lOsoGPR?qVA5suR*;HUro@9yZ8`&< zwVLSgu6(qAz7k$|u8By0%?W(eD~95`B;)m+l_Bc36v^%K!CQN+#7m~U!tbt6r;P4D zB&Hv|fYvSEg=PpmaYS-cv^CL{1dp1_gFO$_p>C_2Q2G3Aba2f)l+yJOGB-VsCN1RU zEW544@8q`aev{{tI2k_=;1!pK5BPv z#+dXTD!muwZa=?2$0S+aBCsnz7e%hL411fLYrAWb#dv7MzeLaXcRKC_E&WxYLPQ## zsJIXGweh6Bwtc}Lll+O;=97e$`YRF3@3}$cX&7YrBtfBTNBGh46lkNLM6a+6z_LN= zVCjgZV7upB!k%2}i4R(A1*tdHs5NINz!Qd7kXCXEo^{$8yO??r0WnE<^nNF}Z{S}j z+Wa%Jeau5j2bZ9~J~zY3*?Xv2Zhq_qf5}Kg_)tAgXeX0wIVU(B z1;3t7_)J@klNTI9LG+%2N$h&uYIy|e!(Rc84few~tTywX6qyR1jhF_Lx6Om|mT8ml zOh*UHDYEZT`(BH{(u3cqRRb@e;o}+ur9BhSj4_5_tM(9JJUUm{ws#x&bW#H7F|Y-3 zc9B0cJ1H;dFrf%q4?Bz+nd@P*1=GQlq$yyC)(s$kQ3a_v$KbECyW=Akf@zfH){MMI+(W^Ak$WKWi$+-&{?3RTOt;`06 zOA5f+HxcN#za_OkrYCIHv?+Db%#{D6m5MModk%UyusM8ocP832?LJ!NJb;)P=}Sec zeIR`LFV8(UEd^~&CQ*-D3-Jl3~V-a8MxcW84e?j;-LM65iojUCfpE{ zO(?#Bc+S+Rz+qi~Fhh?XM(X>)_OvbN-C|9=F~kSA9KsXHu8AQIe+@)h8y2I{r<$Yw z?-WIs_w1mYZzu!)*!h&e*cT;+x>1ANcOcQ=Nnm7PJUTKs#PZF-<(41fHsWpj_Tfy= zo9M>a#}t{iVg&!f?roswX$o6*x53@bNGeI~6pv$%z4sZ+_jkxUv5R^i<4+hDFNkTYHcYR@hFItbZm-=QhDe&nTj3+)CsN zG}&Q|$%6M>q$fXp7OCx2rc=U{X#ek>aDG-Gm3~CghzXsDRy2j}LvV#f1h+~CR-d?#BE>e(DTJ(6Ll3!3Ky25__L5G=7& zp);$0X1n|Bu@^z^Z2a~d7QSAAcz-J_kiK|_XeK+8$DZYqg3&!LtX000?R4DADoZp# zPkkVL;kTK^T4fRQ_ZDDX@Jf`eJW!nY%?;Wd>WF%zDJ)_A$QdKTN3}<>gPmKL`O_e> zv$R3v)$?WWpiO)t(rJx>N;4zDhz+Uw-eeoj)UAg z5(nOC>C656UovHHMS9L%h5CjCz@R5eT+6zH^yFGVvrz+auvjW=`^g5;>@~K=e&$7b z*HMTsC)%$Kg^!Y}tYPpve9@W)JNqjz&%^{F&{78iYTpYj)mnHvi=fCOmg~*Y!N}Xm z?7{A1=rO*Q)XPrbUnJ$X_yRLAchYoW&p*A#uBpNBVI>10{{)o9H<7LPWO2-$D?+z_ zAPnh0jmFnoGL@JHxS6IzPpXWgha58L)ce=yfrwn1IY5P0NI7BiW=p6T(Eu)|nn_*b zP^uL12JBC!LgulhxM7bg-m>+F1CN&D!mou?qVv#-?wNdrP876pZ0#`W*=)s+e(#D? zt_MN1;WS<{Q;Bw04`nhb@?dm*HGN#;$ZPVZ6gpYD^4-V%iSl`8-Y~UI*zKIk@|~kl zX=Vp=iJgrBJsPks*#tIsZZP-0KS}gF?*h|Gn+nSU-?67J`-`0Wza$SW4oJ|xr9<@d zyDj9ie;YHu){lHWoy?Xe>tOEnFTxMII>|uy4N!AI7Kgee@UGD>c`^;c8Xq6jU2+|> zkF=9xYouUiI$>w4(nTtrO3j7SiI#2=zY*bdV+Mt zRjbwE9BZCY=&8()$E4waFW&r&v@L(!t0=NH=pbL$$6-M2G;q&(3MrB4#AH|^-gY_1 z{kxNJ%|i|qRv%!Gb|RLw?cp}Bjk&U9qzvl2s&W(cax9D)$$Lz{b2YUP-WIYE zb>xfS=$C4?Yj(F#7Gnxs`u6pz%pbw)?#J*m`VWQR zvetrA8FyGp+5?3z~NQ`jN0~$jBbht`4JCEPrF3u zO>lw{>q2NqpT1+>9e~pgX@JdRb)LBEEGaEGDX0!Oi;ng0ahpsbUR^T=;_ey2ZjnG_ z^`}a(GP|BE^>)CdF*)R`!*KlUEQ`;^24F>)9;!LIpzOUKzSL`Mq3kw=!qS0;>_ndc zSFiG#dUeOZJ=0t|EDY&wC4Eph@j`TL!Z4a!+63QS%fvN~!(spMwJfJ}qPYG35PXtm ziHDuLsJ7*9Y`Gi<8B(e+u1yk2AFWKKk9aBEdjB)ioZ&;u~3#1p3WLUyF4b-R|a*cy+a+HB=@BXZz#)g7vCl{wLgw2HTux8 z;EFJ+@d({E=to>0D#uU0@I}+IiC}ogmioGta_3cEDE;jm>C_vB&sTnA_tf^%#k&o; zLbDkj8k)^aeo1GtZ!DlTH4Vm_C&GvfE7B66#=lhw_<4*AjZy4d)PzkDAE1M&Wc}Sy z^k7~oEqX8psoWQkex^+;-!wtF?mX(1c^jr;1QgW9z)5R)IzQ<-D0bOH*Wzk?_Gu^H zk`CqY(Fpg5d~x})AWYfU4_mXA@NbPpB>bQkD~WwWVU9FilyDS=jID&^#tkg7Hxy4i z?FYMeu7Xh$$|bCRuO)~T4)Il9a)rgOgSq0~<8Y*C1imb~PF6%b=CLXiE4|K>U)q9s z_$LkVm$y3NLBHyWPp#CYvl4-B?0$HcvT(%+zCz_3${ijlnq@YCRxgyJ=WQf|fp|t>WUM+LSi*VAK$Akp7ujHinbf&A zfbQv?PosG~Ra>}{hc_*S*TwUx=2d;Ze8zFO$}%9rJ`g<0H}a-O@}#&)lciM6E8JDQL5hDrH;=OXGaBKS*2s_b; znd2sNzlA+mT6ct1jHW18ZiBykwdJ;(s`2ZJM}RAei1#yfn&zs{zbv_o9XG2{-SIT< zm=(y@7yt}PHid7s5zM~*B57M02!>ZXNL=?4$WDqS#oAL~uA!NrW-*ED_KoP}y#`Fu zp{|FQyO+V}iGkpBWihTW&c}@Es6HUR92{;M^O?kqsRkzUy($89-5*GgEDhyxha*w` zn;U=a=Rq}oy=8vslM>l;?1-q$=BD|jqf&zY%6y`(Ia~DQc!yBJ~fHw0N}Q)uzkfcbr$SJFs^{$mqc zJEU@i@=Y7)OPiWp(Tm+tm@qq$M>9crCpZ zMN+Q3_WOJIP&XbnRjtOmyDsv$qA(KcU4%DdLrAlcG1pq5NMLj(hPh59mt?m=Y~Nzz zj%o|evK-H|W(?*j(jUpV3LRp&qX)j%+R2X}J43B*oyEha{Q0a!F7T5^1De_jIN-oU zZ1eWz&8a;o`DUJrf?gS&7`aiTB3vX@R_DNkC`|YJD8Z28A4C?#vG{D3ItCQGkU?*! zaLc%CZt`&lKG_wGURt3mJ^O%OU z*kxLV*Rmg>L)>au(KrP5zd8mNqBctSroL&h;P8BOvMz&E;Rb%OvPJK*K{Q_QVWjb_ zkUBS(eYZUjhPuwjnU{W{8C$=SdhsOkI4Y4C8_SaD;*)$iRYjflYE1so4Smj5<2%z9 z{^UU{%Wsl{+F)1MYyYVKH?T?V}CGh~X+$^lQZ~Fw?pU2TrKK)Heg^ zbe%lle&sM!;{{P2BLazXw>%x^w+q5`Cs4JbI&`)@Ox?|A5pp$&=jV5m1<{JsO<5NX z3YpOU&Xb-euDtA{7JU?HfVb#4F0(TguG!h6U9cOuy*!jxDUGDf@$)ISTT2$7xe0?8 ztmd`O%_w#X!2?AMB9#_m+w@L$pbDtO#!3QP8kg{9Nf(xs`SCg5?y+KhIbPS1giVnV zB;@O8tp8z|=*O%EQrhtYna2 z3GI?+@ob*EDcjp|-TXq0wn%-Lk6=CGn&2pJB+99`6IM?&MHT;6jCAWll@}sjv@eb> zdz8#B7u(QAofI&P??*Sp22j6K+B9PaveRdQ|KpUvg^z3bsvU>vyxjBbh*LI~)>b2( z&Uq-Z-)kOk{e|q1Q3c)2Mj&|>nT6V_xA^JtN69DOO3|7o0slMU=ub%f&*T3bR=1V> zAsqb$R{uW?iu@P(%fAzjeEvwH^Zx_q{2gAGlIq*=>M!N*@YCP@i@&Y?_gDOj{r|T$ zfL-1dKI^#8PE{y9c*^KbY5=XgW)J|NqB{YFvsfi^8K%2C+}^$e83^n!L|laqEf@AVyfjGE&{cT)|ITj;*|XDECP-|#h?|wGkgM?6w78+G=yKtR#fula z{2SZ;d*^Bs;uF6%NMe7Gq~S_QAt9l$P+^ucX9r2`50W-qE*b6{;O8H*D@ev)*#G|q zk~MVY2`>^7b`^>BAB(Wr;(sH^{dWZT&j>;xK5-sGQbIx#jlKdiVFCurnUVT6hq&|H zMQr}|{D8ON$H-$%QCb#ygS`K{m0U6xh1+KBnkLs~XjkNdcK z0@tbg4E4>q&U!RlWvk@wFvqSoa)s_QaK+RG63R^*v<-4uef1B)9%c&z9MSBz7b&f6JK)piTMwd6uekrq*F`%Q-4wlOafy&DYe zSO_0sxLU26+{e?+xjdt#@Or&1v8s?KYfF#9jjM^Q!me_}$3H+S{VMi1jU(I@`ZiUG zJSpxuFA^i5-kn@>F> z?mSRZ=!P#nBDS=SU$Do z9wOOeE9}A^mIs25>*BVi|RUH_5(gE+8 zy23b%&w`IFyGh?HBd%MKF}PeTK>1!PcDr*b^f*fh+HK8oXQnBc7Gg}_S)8RugO}5c zX;sXX?j)}L#1+7KI92fC5l@&D59i`njvIMwc$M3J+XuJ%H}Z9ba+%mywxnRoOMYRl zBE3G;jVHx21zLwrfuC>+bmwoQJLA8juik8^^q5IA*B=siD2X&|pY;P>m!;G28{a^_ zw3RMS^`qf6B7z-%Ksc@%1vJXJ2u?Drh{)xo`mg+X>((6nMRlI$l{Q9gbh+;Jag7(uSbV?1=Jt>db za{dl`HK|cBC(r@a3%syn#MCBYT^>W`CE@IXWLzV=hotYC!_Arhg2cMd7Z`5cVk0k@ zg)4U7BCc}^*y`u;m~rA7k9|J31>f&G&6dOtu!rL`=)L+|0{8frf;|&n2}Ue7;JwZv zdj6WDO;w#8uGjWt=auE*<%bl%J~@dY#gph3=U;U2oDhBq_O`K@P=Nu8jNqf8rr@WQ znV{$LTC6!~OI7XFsb|~~jUh&4!JDcraMvDXjpz`JMr=!&$PWYM20da*5=`|n!{ zGqa2bcVz5o%d=d@VqYU2p0{KyQ|_-LrW#wx&r3FfFug#qh#aC~Id+0{Nk96zGaK*x zvJlMN)qwm50{pk)2({Ygf=ged(ypJDln5el;`C^;U2P4sIX9Aq-Th4AQ4Q6}tj8&p z)`EcEO*oc!@Q2JUO#btY|9pEXWBI2Q1`99KovL=+gqV#yHs-Me^{Z|Ml~Gf{zwr)q zK6e=8zn{djwt5Za^@IHPW={mciPG3CB#$O@o{=Fra}s&G8N^>MgpZ;RZH}7B& zRhu1k% zbHng7J?^7aF}iSk5qxr4PLqwU3fw-d1hGZ&)^P#nS+6}@%EV1!GPlT+ftuT}ou1?t zPRhdW`=_yB`DPO0vs}U_z};><>~j^#4y>qBM5lf58&5Sbw=O%%LbGwC|T0nWh9ZY=Vv;MqQgJi0R?4mCI*? ze&n#(Ul)^k-l785ES{MJ>&eq^|7h<+AuQ@KBHEuB{4=bBJ?6XkR*hQ2wnWja64Y*gvR-}Y6y0`f2CeW&=5E`fNJrOm zm2=SZeC^hCo=)h2bnV-SyAZsRs$7=ngbMY%Hd@F2AiUy-`N`oEuY*P$P+PjU|$4k)b7ZONkZUQKAxhQL9OdQ>8 z$izLoA~af(#cjH`8!A?{vMIiXg4c!D*~R`wxcHxdEWOIb+hOOJY{M*^m~#6^$+;WYF?BkrJ=o?p0!zU z=eH!;u=^NpjP0S*^7W|gfX5jA$nU^U*(;z~atX6p*Ujop)jGO!ri`F}_5##Rm!-RO zb;*=-3+UD85_0ZJG_0+3Bz|2lh`PgAK@V*xe8l=r`~;*?5&ljAsfs2Zn~A= zJXf21+}Do9pWf0xEjRgzKL1!Ri!C^Rd>k9A)kmLb93e4DefYaN4q)6hyk?n#7af0c zjGlaF=Y2Xxs+_Km6-`GvIKdwmkMuM8-Op*$k2JD(e>KrvwiIPox`T%LH{50X z7&1zT)18X%AAxgUI-pr+BE3GZfqZ@%&)IoZpXN3=<9v(9 zu=Fz*?}!XhrOzI;yoM)0Ya<)laCJS#m`IY@;@Ys`oE8~jXVXQ$MPRy)onT`cz~8w7 ztcaDvX?4fov8N`hXnKNd)!T?6SInt=-q;6O)(N3sw{d^ooB~q9W5}w4M^k(LQR7cV z%#Q6B*e6W@z5N<+DJ_uXSvJC3kwxT{j}rGUuS5xDHpP=IgXMhR6)^&p>SqO-&FH|DqPu5Dz#GR#JfI~??j4Q-2d)z3jih-&)wF3) z9<4=rkHT03AxT=lFb?*QQ$v$2b{MYZPWg=$EWObOb4$O$DsxNRu4+%1<7*URUgOa;mVcP0uX6)A`{rm#>Y$SnM8~$6?%m zRq58H3Pn6B_De*tq0Nqtm|Q}OtToi@^jfI&<y73ZE_=tvi;O(usN z<7n}{GAOt`N*~9cX77vgIa6e11>)a$@9A8VDst;@G4AV4Aa3i-IP<^8@V&QQqx`Ik z{3X^&b=N14f`xf0Ha>6$cTpXIzfmKJ*knxiUM+_pzn$37I>P7$>5*8=DT2=lF?A=m z1abti56H`@#k8+*A^P86P7NQ+F%cHq*mCJMlxSGZuMkT`>9m;w%9GJR6T2d`T5Looi%k z*i0aFl^|t*ASKT}(%fqTaPZ-v#Gj=QzIZ+RIlGW%cu3(Fmyh)PVRKBEOTZA>N_N$> zeOQ$2PTyS0#IGl>@Ov#%sj7MfriFsQ_V*sfsi7M6>^8F+3{RZy>lK1KI}Abj{tLwY zJ0K=73f?hs@N%I9Y}{T&dBcW_Xb@(=KW?x{IB7nGY)ie8|pijj%1H zh-0)P8dHzxqR}508a*=vguaOg4D3%>cN$a@Ro8_OQ!7hVReuF3PV}NB zb2CA@z=LsrR|E3K8wk($wm9R~a+7v`x&z;5a^PE?5dN_&MpZj?>Ji~jJthZni#|w> zk;FK#uZ?AgF7rvvv}_ustO-FrDqPgeqir&g@UmZ#J9mc%r1wY*_Pwegy#{eO`=F#? zibO14tQcV~-E<~hS1ZVlZ5lAE=r)y)oj~_Jkl_iE3yaw0tyVPuydEi-6o4Z;l}Lba zG8V5bgNw;?;YoWwi>g`7@4p7zZK;uP$5fIW8*t#mv{ew|d4;SJea8AVieZ}WMoi1x zLpREOCmKH{!|#ef2FrUfHv2O^H_0L-S&4ffq?3%(IZI|I8j^?SyFjv(_m{zzWMZp( zm|9<+LZzqOB%dIL#C;28)f8Vanwp}5b4PQnS|v-^M#E)9bod-HaY~HXr3G9(dzuNX zFGOBW6>j(-j}|{8==*F@e0jT?GzJ~v$a42F@yd^2uC^ZQC=LR%f+{k8+c);e>=tJJ z-;3}zO%B3%Z#=O^w3w_q_5s~TmC5|O_JVdRIpEE-WTzP9;hARwQk;_xp z_;dDBjN5+*Q_^m-;|@)tzD_3OTb#0h_dN4BshV+%^lJB_`NNAS^-fe!qyG&wPC28Y z+;_M<{R@%Hc?O&Lo|H3r4aDB5C#TGnY1`u2u)|4L-9 zs7)o?MXbQjzK`6g-47EgZAs8BJ(_V{o^)zP(pfHT&~0YLF6_(T!KJSQ5aG9$)TKPd z?KFT)bMjQdpJ z-J;3FqHmZla`GNgF-@a6JA=Sn@QJnRYvE9VA9r^56j;9 zaEx59;eNAe#Jp07Rt>LaSI-^eB^&L?Ak=e|@4CacgIn>Mi7xj*@Ivmpb)tfsyLZu4 z4;yaL0%>R;UL#m&_8K3A?S}n4vtpE&Uc?TKQzc&wGRVF9Vl4B?ARZtHG|dD15~_ym@)Zg4j~&O$@q3zw2l{pFwanSed&BAKy5$$wy%o0dvAcfKP{hh z*i9l94(9N0QXy7}NwT@S+A%cJj=baiWULOBK;`ZyZzX*fzTxJN zNAOSUL=cZpA}(RBVCyiQb((jLR*D~^;#foWNP7Vr8;c9CAXJu%(ce?f5s82#`bN$l zgcdgu89Nc?_0Aj^E@`AgjaJ-7(HQbXEtMV)7|(n>wgSHQ$w9eMCu49)A9#sLHE7Z> zosR51&fOY3m8f=5@K=_=`=7-H+QBclZ_NkU(F@0@(xwwIZG|#1+NZ&txFrJaSeg^L zg|hVX6fTCJ?ElJ11C`+~Yfb&8}* zEVRjq6E0+l;th6dnkGG1`U_TZr-8WKHC*g5g}m;|CpvvMagyOVhz`5LXiLArNslyW z_KfKQ+j19JPG_LmAv5citQh7EkE6qu1{#5rTLG;QK}M*mnw2}SocW|Z0zt~Atc+U% z8{QnsZ+6&7&WH1@+?F21tYZc2X@g3X-MpCaAKu4FD^G)`36CuJQH%!P9f^~<44rB< zk-z4z1)2+<;b^ZLdso7luN9Dk>Ar(xhs|Zw=_{l4S*sY{>ijA7F8kMzo;$7ebA=D$ z=gX<=O50E<{d*bngLNn`-kMEXKam)FoMC)jv)MDl`-#bi7o1N%y!x((Td}Ta0X5I} zWS>5GfGOVs>8goBWOuy-lUV!K%6M-jgr9y-CRKdG7iFs%eYY=EW12ZcfBs32Kd~p* z8+CZ}WrIDNyD^G=?dryOJ{RJ@%+4mUKeq5!FREd*4i&(O$u~JGO&>9DbS>cZf>BJD z7bQV6=0M_0Q$FSXhLf&FjOE5jc;t>Qy}HjBnz`Ev4M~FciIwcYR0*Uk5S) zXZcur1=~bPE{T$!7-b_RyXC-EdbyKEFp)pH9Aegn#&RiGCi3 zknstmEkKH^;!w;#=dLM;-5`dI6(3O3LxwR9{sV7I{@@E+6?9#fLp`312j)pOe;HB2 zdGBJ0QI|TcU!B0dYpZ5&`R2hH33u-A)-XuYjm-8Zi)hQ>JoqZV4!H)? zAw)!phTif7`?ia0O^-W&b50Z)+;ACJmn>sFR>hP4GA>>AbuY0Te}tV;sEqqResHG+Oj3-YhZ0plms^Nw zmMcJD?o67_##r-LJC5;^8D!6AmPkpolLKp%$Yjn+@IA?8lnZCj)UFGhj6QvG^jR=$ zV;7*;zAG%xbRu+g zRU8W|C zx~D2gwccM);F#kw+jMr~r#^m4n+i`5lFB7L`IVfj!KrjurVsQ71@+yM%H+fzG3tB0 zntxw?7Bw2!$gX{Tk3S9D$V0_S{Ls;ad<}KZ!_sMZLeI|nU+Oy6;i?uYrb>|K|7McS zZ4|0|!pL-Idvs>nAgfarQkp%8+nTTJaQ-md-9Df3jFjY;xbv>@wUi2B!otmLoToP7 zFFFhD4Vt)SqZzK-dJ{JdoZ_?_Pv&ShzGVHcPGRmCWp40 zfCA4_pSHD9Cf;QR)Ylxvxi8YG#2}$J{`jF_#eSG~m!;Di<48uVISyujAW?ozczl5r zD_lK|^tmXonLma&xvJmTy~D$lIlq@GaQI~ z&ElN*7;QHdezn}8qmj|TYZY3LhaWG)>sE4P?}id~Y4m5dw)q$TxQ`Hoe=*`3-7|%> zvt`)fT}TCIC($KCD`;`qL#(y`#>9*Ug4q#O5S>077l-Hwj_a$Fcb9S@R?Z1i{=C4f zixY{~lT`9@`FHkRlrj~5F@?PN5sENRn5+sB7ySA(mB+o-*M!Wj6ij-)7*ay2V3CSH zbNcrZ>SF|;Y%_zdyk$Z!{E%a&=YODcw$GwA>^Qid@eRALKVhWWe&V2OIC6yiC}>xq zmu4s(v986aYu=3fcM}@(<~Wo%=CD)0-bNxDNqRP4wBGonoStSOOlfV77FJJiLaT{dgd=FL9~}SHdOhbHW9_t@ zzO5g}C^effkr^U%5uD`ryw>7O>J?$9wjr%}&_XN&kDy3WHRs9tRXli`?}90LssgtL zIdVZl6}{Ya>HUz^m|r!>5ow9Xx8aub!{thA(M?$>x5|{-rDigXWt!Cgjye4l8Alc7 zC1BIagJ2uH!AfoZ8^RjhXKya!(%0oiteK)EQF%E`-b^z2n-=@_^b8vKY#v=Bs{qs7REbtVM}3I$Ot`Z@0Au3svd)fY>tzJM>AU6BDCahfX?=t#Gn+81`yYP+NKv_k8TFHVPaJV|@5 z$l8AtLMgYy)Hdxjf0cz5$0s_QCh^WElTxi!U~w!4q_rY&g-0cQzf6tX)=H;G4;}=K zf)lvGFA}W{AF(Ac5vIRzp*8Cq$zzEZu%>gg-fKCV`2A!mq2b$#LU7R=$&@seib?KV4b{w*M* zx7FCX4H@|CwIpXv*dSw7!LV_0LL7tDz99W8oJNkG!#@@)Xnw7>V5VaX-P&*fR^G`6 zw_X+6R^7vh?vxVLHa&+*Atg9cdK=Gvu)^WMnPlghUVelmKanGPPFm<@QwRsM=EK8Ha!iYXDSK$Djv((}6Y>&P z#iHnwlaL84_01fDpTBm{oE@2PeB=vfhK)Cgtd&9Ue-YHMT8nMSk7L)bQ)0qzexrBO zb=e16f|Ph;{+^?;(6&~G)0Kaa zYONHfHrKL9Xwp0Qean-v?|KWY{$sfHdH`KpdA3w9Ya(&sD{$LBXz|xC8NrHdeMs(K z2;bofmhxpmr`QW^r)7}CXYS#AndjD)g4Fu6C$yoaMUK`=I&i-^&w=Y7l*qR<13~Nh zFgEW*HCmc?qR{G?#Ycfgl{#%YQv;u0AI6Tto#>S34qH3auzAHc>)g-%74)uMTWuwZ3Ms zrAr;5aH}HWH5fGGyW5($Uc;Ns>#<{7C-%YAM-D`!SOVm%2G|XfuNm2`n<2Zsg0ttW zCtki+!k!TxE6!$gk>{}!&hRHtTDTyu?ltj)5$`S zt@LEX4R~YJjmwUT(}jDD>Cj|v(xT_YBk+9|j@M}enTRwvwOo&A-ADl0>y&C-ola*a zh|_=Kr|`GOV;Jd5VJ}4(u}aYe^oH9lYgh41(5QPvYA=-Fmv=d2ol_)d>Y^Na=#m0H z(#wMjqC3IiPBt2*2XO3itEt+lXY7_SVxs0ZA9rWp#@&kgTn#PW5BhP>Qchi@5RsCp zAnEOoVfIH^venv_xkGem*M_$^Z{z{{{9ZF=l_}8-c*D+-ngO(O2Y!8#$EN!aV^%iM zU=bwCrFdV6mpLfLGontVaxnm zWES|6WWy2AR~C@qwOWMtMMzWd>dPxS$NdyqX(f>WFAqVb!)o%pFulOI3F==)RV(2^DCX{m#^Z|Skpg_@|-{FNPNyOQ7j zFou6>@IG8$CJS?HpR-|tHYUNyn*OW{=GQGSsZVpB$0Oc@`Ea)VEYo+yiA@b&0?*~_ zaoI9GYX2z}`@^c?_oGBef3%sN)p^4oZ!(P-gw7GXGmqk*xFSJ)oa~wTo3Fz|8V^=( zE%#h`d+^xy)V8Lk|qC<-7#WVU>yrB`+Jb%nnVwAz3BdQp}NZadHlhGgJ_l{W)&fE z6^7?`z-^OWJU*gIKl13pdWi>4aBcWC$G^q{?-@8*PgyvbtkQS}cK86>PPV|g=(nh} z@DM$upNZ~65`w|T)7EpJCGx!+RTx&Q4L>FfGcsT7smqjH6pgz{ki{)gF|3JBNp7WojU# zssr|M_F&PI&1`DJM6@jc^3d0oDV8-CESsAKm+L;jj<1seJsKF#ujiQo6$6Y=og`f52U-YyLGejEW^tu6a4$~7E$!io*Cn<- zc|KZQI88oV@ZM3bf)MwFodBbmi;Tm)s~~r?7mh5bMa!v|fw_8)iiDN`cYPdQn=Vev zS2@+IXzj+pgTiBef?~L=!$GI57Odhc4*h$IPxZFVf~Rxp_$PSh!OHL_a8|y=N&fTL zX(3*?d|@GQowgGPeIIs>%qK*XDFQDZXAb^cDN3K%eCK=2w;(lErRX4y$^vizR~tG!_g{X2RXMZ}juVQ?j@6E+)L%=~fV!j)LI)&{MLxcK)@ z2wkvLhMKhs$W1L_dg`tWw*_{kZ0G@iUS(qJlv^cbmaO6&qH*^=I6z3f zlD;<$Au0Ucd--ImyB-nd`NAphiQv^21};Yw!SH45WzR92Ue! zRYTDJJ&_uV@IX7dqn@=Y zYX153xj`F0DES^P+`a~u7!5G{rW9a*TmX8H8E!>FJRoNHGAl9h5u+rXjlWk;z#hf- zBxb@!_AzaOEbSPWlgi@yP9dJa;)Wp{QLAR#5>2_YF$I<#KZFJUj^gWvW+q_t6FPfL zq^n<5!&SBtvX`wUi6@?5_zM-%zdRZ)jV^`ZPFXU2c^&gKWIVRswl3b#?obF%!%pI@+LLs>EAIwW?)c3-eVD^;d^`g;h^XRp z%UPV~1}YS6mw;>3U7!bApssF|X>|9*!woES51hlB4$9USzUA=Oy>!8&#uM3#fn7kqp@2zwu)w-ro#=Fvh zYi=devIS?EfJqyu6ur)wed8T#^r^Y(@1o8znNz}&Z5p(oI4D_#7 z69n3KqO95?c7C}t`(j@f-uxNFaS%p~G6#5E_K=wp{gyGRxXlmnn9BV4s18E)^Kjkf zEl{!Bos1a-_(`iofLB@?Mr#&&qk5t^kcrXk%`pY6f_)1kKZkKp2H9KlmvHYdzeDpJ z_p=ie0}f?WK? zqkGm`LZ-kN>HqxIP1%$LTf&Xp(_lh`HO+abA9nu(MR#P?%s&h2d<>&=e+fTOc= zS(9=f#)RHrmcQ>M1xDWBe!sxF6K}$jZ3Fcq;>}Q(!r}Z@jAmAj=>~FNcz@9)sug+H zK0xTT3|gQigY(vN$$S0%9Q9|NFkVF*kEC~7ua4EFZTnPE$FG_^9MzrmLJ1Qa@`!w)4{0coB?x}O=J{Z!l=VC zH@xL*%^u#hk$t_$gJzaQ^DmF*ltjiCg!I{`2~aT2W~v@N@#uUU_)z^NyLc zD}xb!eGjlRpYd4roi(y(q8%~|q5r^C!mEjnq8ddT7}xqYoY1B|j_;~O)N~zSzVoiK z2J@!kx8*EkD(Vusl#ig(rB3c$HWh3tGKcuj+nHhQ*HnhdVV0OKfqxJGFu#3s@kakX zI#r?p+Uh2bHCIGKcTy$$=PQrDy5|cHZ4kwf(X&v~WXQeqC7%6UAlt_2sd+6f4eq)JRaoKEcLn32qH;wLB?L%`$ z$}&Wgca%BXw-HPDH?fC1gQrOI4MKi zWBr+QIcI#Hp29TEod(?d31Gjw6)JSc(zeViQhsj=XX)c2c2#gPnWA)ucAK^0y$dk* zlnztA+&r+c&1Yk`$qJNq#Gw^0AQ!&+9>(NUX?msj4Q)PVPfGe-u~;@5aqC#aj`Iz6 zQLZMqKK{V8J>Jbkw0pDrH|(cNM$*XekTE$pID_W+Uq{nz6JbQ`65}s5Ko=<3Qb}$! zf3sQwR$rBaM_mSJmvLtdh$zECxpjp6`iEz{e`8}PLwN~fpfljzN#_01cr?s^4L4Vx zVIIdgB$!{?K3fwG7<&KO-mPJJ{WuDxE2_~A4xUtkQLW=H6i#j|OlmykeBt`;Ou zP3GJPr;OLv$v8o!7khfg<4&y??D^FbNo&h2OgUnW__h!?i*#7exwDrk`4~@+6kuecIOyH>tn9GgSgB?+JTB2FnFk|)lM6^~(p zua+?5OD=HAtW!vqxvcYuRqaw30&EyMneva)^^*f&>X?_b10 zBT_ecBX#(tNF{aJ*>jVpgJ`u6{h%fWKgQ6-+s+5f0p9N8bjOdR`t3=-nN!P^!PR^{ zv)Sv3Rs9KJuGFA`pxacPzlf6snT>zh`x(CE&yhJ)NHG@j!{(7t#b3-C)7@m~#01I- zN@Xj&p5x(uUm`zY3$?kig`CZb1)J$oFdUEt8;LBHG5Nqen5IeG+ZABj_X=iwWI6K8 zf8Sv0l}^H(+i}>ttEIkLL6+Nl?k4^DAQb-8+CZQDCQR=rV)=C^;K<4<;+a_jyWTp` z^>%)A+u&yFp~!x=h#bNfzEhZpu~wx&hxE8nU1s3+-h>|eb(`((JWK62$FW=QsN=u< z^{`V!fG>2S@PS1w`Pw<2hZ=Ktlat#VS)r#A*4}%b;~xJeD*Dj|eD75? zH*^)`C=z6)CNbj4lQ4P)kKd1|qQ$HxQTdB`!Tf?|XBsnpm$_KFua@t}IY^WybHHCg4?30|rTvzJY}{{cv{_RCrRLhi zz{ZyvO%!3#^CY99zMFsPnj3MdEoHN&9b=B=r&3q<3mju^E!~zD2L5?P><*rD3SMqE zAdgmvfu^-veJ6X87B72AU(rTil1gz=;2daUW!mPFLSF|G;M~E?Py0+cA2}rVi58KC8?+V@nW16ZmpdQ6<5)p;HgR0bE&jh;syc-c)B+Uft)PS<83eEMNj5UH~M0x2C z`lZmG883E$!$uWhO3Gn+q>y7Z80yM{({Hrd#eQa-i)!*rLS8=DH6LWRl;^NRr+n%X zSLflwRXuf1ZZX(bs=>PKpAYM1m-COe>yU`D+ccmN>8jLOevSM?xpw~)Q zzH(8BuClzsx6oupAT|de)4|@-GE0MV>!Cvyj)gi;}v#^Xarq$ z$_k_Zma~1<{fzd66d207f_jZkEO++_bP*ked4dz<{gK@ySuf4HP;nfVPL71j?|_L| z&Ekkm3M!19GWULYA+;ux(o%d!TEu2|FEmv0Iz|tXR&~Ppa*(wT3ozKwP zcgo}fum2r$dCE8DZuV4aU%lVzyiYu4uFHhYVexdI;$rstg%Wta#elw7-p#+cViP^P z>=9br5#u(8DPXF04_{S9h4pbiUGKPW9Vy)AL%Fjz;42$e% zzXs^S+gW|gs}E)HaPM)vk@uXL-PuU0jxWYzUVgYYEs_cEoWZef6l3n~-i@5-nRxK| z9B5ys0$XdIfx;vaTxxj^&rE;BFJty0Zl6XbZi*vUeI`R|q5*FVcp`aPW``|#I$V?$dPgv(82{<)Vh@}z;$(r;ng#?}H10vAx*Inrd`zxm|(V+qEc zxj>q_Q`v!Ax3T(M4&z}`hY}ev`+la>~@N!9YWd+q+aJl+RP$h1BvXd z99|q#Ec~63K5U3_*Vb^7q{FeX`Z0#z7)MM@L>ZH7Etqs9ns{6fV0mX|K!(yE&U$@S z=rci77?mIi{3>Xdn?w$`1L;|_l(TPq8aXPX&z&^mC}zzaYrGwIkaeBd$bT}m7*f|) z(;s`PC{OzSKIG*`OD4;rH{{fpz7N48aXc;+&ew?&)N`$Yx3s}sR#Nj!>1q_Q)@CNYXk z7yF1Cg)O&gaijQr6u;ICaRC{mBC><^eKM6O8XTl^{2Htko{aDxa+;ak*~jq8Il>%! zEl={C)-at3*VsQdd68HbugcLG?HqH}UtmqdnmF?tDmc{^$Jm{@QLN^y2((ua7WnyR zqU@QUQ2yu}GjeJr5qo3{TSJUF5%wqHjBqBH{d~?``qxQaZw<3f{C`wp*L_%gr5rEK zv#2keSH`h29>n|ilPG^@Ar0ZgaQyFfus5o&;7MN80M*rePUh@A%}(g(VA|hb#l6Io zlj$qTT<}U{wq9F6QmsF-hLvNl2xX{)aFX>in(4(PeCt#O_Ip;xi z3Wi2hkV&hA1aW~LY;o2)&VWGkBY^#Dv_8xe3XtGx28MM}akE+mC>y9?zdfSCp^lXHq68Bu-(F7)lpSCBS2-0 zZtbWy*u!{XXDed-F*Viwn}wnHXq&<+dg09Zd>o~Y!kcbuJxMi8Mdy5)^_tm z*lj8$m~3u;J7*Kcre~$Ih{cvsQP?`^;V$4ZMBti53FPw_#6XJ{xxkASH6{myYM6so zDzZXPb1*IxuPu~7U#2Jt)B-%M0SFzu9ZG;(EkhU_nA-2pWdvfT?f2(0Gdd_>ERK`{ z+2a7RCxn5Kfnl*lAR~%ZMupO~g)-e;Lfy+99zGo0>PZwhYVs;?r0Afxc<^p1FAm#yLULfWJVtybN0AfKP2I&z7 zVi6z~1!6HE7FV*taGx5;G6qISII}P?EU#c@V?haMlS1{{LJf3BYJ!7+$(sonrwx!e zWngMQ{*4)kSrj4Z207Vifx@8yVhs~0I)j-^K|#R=OgRNbsX3*Fh1#`+I-#tIIRzPs zg}Nx4boD@*VS%{Aufeb+sTiTts8GMQ&>#a E0N0-XAOHXW diff --git a/facelib/mtcnn_pnet.h5 b/facelib/mtcnn_pnet.h5 new file mode 100644 index 0000000000000000000000000000000000000000..e13f81b077558680c1cfe66fbbf24e80488caccf GIT binary patch literal 63448 zcmeFa2|!KT_c&hBq*+M=l}bvXNgB>xHzg{il8B;NMUp0&8uW(}we4TwzLv-dGi zMVVEGj2S~Hv;S?l?>*l0@%w&;|95yU?!J3kd#!cWT4$}j*Ey1{CrlI@pfG@^`y(X8 z)1ODO=O_2;Yv1R3S8ofq&h7W?`@!2+CiRuu`Felw@(A#B_sR9OOZLH6>DSvJ*Eez6 zR4XeU&PXnNZrL}Dhexybm(Xtwzkd9kIACox-m#lr*FK`T@^rdS&bYi-u+Y~p$YZfz zpnpJk7^jgdm*(67|HV?OPhP*5Yf&C+j;?gW7w3`T5#pKe7wQ$}5$P8i7Pv5&(=N|h zxLcFD|MlX~E4f^J_&M#IX+59rHc=k)K6pYs@Vvcz=KBTv{uEA+;`YJmX2tJ#iS@wg z<%iG0;JJbR-HZJUueyPpL4ELgRrFhX&<&&e^CP{s*6Quw-A8DzUi1A>ujNMdj_0nD ze!aKu*<1Yj{y)wEu3qo%qcfzhOh4cIgO?*|-J-~E_b}==-wgGeIvQ}aqUi@YVeo~{syY&N1wR!IDXswPuG5gUzA=i zNG?4;;*Xp`?>OVJ{}_cymJW73xU2MzS%pEynWqxDA{a!Rh5?R6A*c*6aH!xo0l z4O+OkcV6G^oTtZraGI8>`UH7}g?R*fE$~w{RZ+F?w#%wIDyqGvh|{!8mD|^Aqx8Ca zgS?h-Oem+ib3|Mm(__X-X43ik1H_TdcS;K)Jk5AqSl zY3*KJZ*(`s-BQxM=v=R053k6cvA%)b^m<2hquSjS5ggb}R5zF~KR;hiQ}^Xw(S5bL zk>g0jKSE}fU+BWHZ&B!(+DA#KpMOLU2j5;wdgZ7W>|gfua4Il-iN{~|_QB*3%b|dC zt-CM7A3^@9<1Z!7&5a|4T;<^Ki1T;ZirKTj6-39Wes(T@A&EWh-;(>Aoc})OFC@_` zC?vq^pX6ol+`nW=Z`Yp)@4l7+UZH-z9$rzsdd87O{caNftbrVUaOU@@-H*7jz$?ll zWZ}YYY18-pE2CoX73J91|06a13mAXFrtVdK(@*_woH;1}K^eokEn_!zzpE^F)IU_u zA5s3>HTx0i-K^;`Z{O;m<u5XYKfrK=w%NUr6Ln zEz^%A@_)Q#>PGxW+~`(MgC2YQjpOuF_3WAR57pCvV^@BuXE$qqsi*31O7|l<>>2PE zg7{Od{E;C3kJrj>#DBz%Zml%@3-9TtYS}yKAL`{_s3q6W{)1D%ZTf@j@XL??Kj+8) zrgi^N5dY$4`Ex=1(cSWQ|IP5v_Xn=j{Sw5#SwQ(yK@3|sH@w@&`wOG!+}qXT#PsYq zbE4YccSOFK_!oxJD=M&i|6r8fk0PAkZ1Ep;q&uqW4T68)JNiB>p4~lYVMKU$sQkxK zJpj2r8E1@}>K}3JUWF4&ujtv}>fY<`{`_eFi@RUOExG$e7{VIQP;1>se zao`sResSOz2Yzwj7YBZE;1>seap1p)1I|+%CUoZzaPu0B`vM(q9)M?GJzC(u2fzO< zt8(*FzUP-HPitRZmnetFay&iZXHQl{cXk>Nk2HtT5}cBkx0mB0J*_-E+_LX{x386_ zJ7Vq20O@Y(3D$qFe|PlKJy@SRvv2V4_3qxjB$3}h{a)`0jem#SQ~iDJZ(;Ip(ElCz zYu%#ud;Z$Lo>$i!i%b57;~#(Y=B4rcFfZ+oV}Jer`*EPxF82Q|@62(0FAunOF{-bg zYuVdw+{dDjh2M_L?W-SL)Z1>|H?Fbn+i~yu>T-Q~Z(OO(?aMRcuG`v|pT_gMZuUX^ zpILD2;=7T(_^I^K^ShpR=c&r^^k(e-ZWyIhSbg^-a~?wx9>cGF31(b7`Dbdm9Q(HC zXL;qj`|{(s_rGsv|Em38=fL;*om?qW`A)yOZ-CuRTs`FK@juHSu3mhv&!U_R_wM^3 zFOOJ{zwo_Ybo&e9-Ti(30yi&{TlU~NrZ*SIRJXU*o%?30%f*ySA-CRJa`U;l1oCq7 z+q*x!{d+0q=68Nya`(f(Z|By3@0aTVEv2H|Lmv;13eR96p24enm*?TzBN}9xioThu}8=`jaoh4}$*jguhAeCZlaw>HK@xFwv0ltmZo$ckLk4o>N2)EE&L5*e0_=#zrs_ zd6LYDnMp*%_%m^PsLrZ>xB_>Xr!#M7(u8k>I~0ldli^-EM8RQ@`6(Sy(!3~#Oba*z z*;xPy%Lg-A;giXD=M5yf@*HIP){v`@%b9fxBk6mKx5D59o>)k@ieZ|3nAb;&884Mj zX!XNItdVXXSi(t5iG@op^a~XrD~1cOgfpwjg%lgy^(mP_QEiaE;396H!(x&2U{;hu zBPx74AMUNWkEN0hkeg?2!#-sxMsD5)cI=Z4Fxym`b<`@H$x``>UVO1;T`v)0oM)$i z?3HPtk~)T2ZfD2p%$&gr^^RmNPdZ{Yvg;x`A{zT{)H=WIlu+=eqRycaNb ziMt5g7hqo4cOq0ejx2d6&%C`pmL=p;hugJIV!g;MP$F|329-s^hJ(Y&y+d|*-4_>@ zY?lX9^~D4x+!bU722L`!c$~`8{5*zv+GV{)HR7*)`*;WTj1IBp)3{UGT1f!A{p>}4P4c)$2IO*ESM7qo|5Ot@@aQi%ZCkO*-naM z5)ZE=Q$@7e9hZl+_wMLGmoc-tp_LqNhz4U3ZRWXZ0ueuV38$xc?fS*XcoN>pIf9eK#1S7PIsZ%O6e{c8EyH3tV?S$SQk5YK-JMftT#Csj7O~iOk*uX^@OQiSUxpQ}dj-#4KbsuP(_xKb7nAb2TS0g1DR^O;1$#bw5CLW+Q+{I&G#!6LmQ~3x zr|e^a4hSQSx9ef+Gb`M_;tfcS%w}D6#@wp&`$S)Q#DJ>__Y&!B!&}@zikSaxaomb-$0)%4V_38F#4ij)a2DS)?r@mzd~} z#Y@ciSe{#V;F`$!tmzuLtW&3|QP`IvTD*k^M<)xynM*F1pZ;Jr?aD~jW(5`I&QVu# zePs+Qt@#dAg~l^03QJ*lLN0OWiUyvKUQBCo3G;eWENi`?vU#rIU~|{}YY<%#%hItp z&eFC~Ft5pwH@Evhu~?}V5H_v^j+{6Mp_iIjK2#@8kCbO=JQ-!4YMhLs3!LDtTnq$d zXrU3KCqW@^K77n-f_bvH;J`)$P}wTLw7=>P%NAPzexF!7lpaP@gl3Z6d<*fAvj?!q zmYb}TJIYuN%kDAH#OvVeF(DS8K_zQXo)s%oxPk1)2nx}m9+eJjJ0p1K)LLyt`@avVApmxUqnDTZd(K@&oD+w$jDWe|~hpA5B=(PzF z^svOexP>jW!xdPHFcaVok@zAvF3Q-k|r}~BabOhH%S+8~So-HUFzTEB|Gz6gdTvv4LN6xKeF*(5zVz%3zx$%x|G%mD zwcdX>2fmLlxw|$8Ex*N#djiCbC;Ptj&HQX%ii;1o?8S3T z&$9r%dr@49zD4fO_M|sL=Iz%Di3^Wga_tJY&Mmp^|Gd4~^#5pYT)!pY-(36A^WCSd zdKY>$ski)@J=t#4JB}O2{+rSK1@3=12YzHvguXKvTm|Lo(RbbdWh%eluiDfnw_Nz& z%kh7CfxpRw@A;VC=TH1i$p5#I`6K^qw|(yt(S0lR+S6b80sl6}{~rVNy*=gLS6%z$ zh)XWl&N*@${>75(pUF+`1!UZ}-XHz5|Kxf4kM!qdAA#w8^!>~qadhb&$mQF=$d+Hz z|Ce##d;Q@?1G(QRO7{tN_mK-%kN#DEgsVTUQ+gQ|)wkZia)7*BvEP4qUnQ;MM*pvVdtdofU^ru)w(HDv{ z#uB+?eOmKAc%UphUg7TWTX;S6D{RTB71#4n*WK7gAd6u&!-G$ zoy|HzHVK#F)(6?Z9wx!MeY}8JDl8)PW9~udwM*2PC;drtdJN-mk)L&2P6#!Dgxf`a=qetSERV%0>H z=6nHl(JbmvqXJBwc99xt98FH>-v*;y%H&)~7PEAp3oMF1ipy?i6NObBNG>djC>P6< z)4`*Voalu4^Ej2dzRQ8I~PCbI834 z-5VAM6L+PP4?*RqRAB(Cs7sODKYEGGmCJ*zUuyBx5m}V=CQ*n`Rw4s+c^N~Sc*MJJ zG8`R#64lmx?O;qXLw9J1lI$bZl)?f#YSkfzoZR9@?ekECS%o*y!wt&dpPS9NX+FbcDwE0H zh-WC&`#71!0_@(;{Lq{u^I)&uN?5wjf+#wNU`ZuMbU{rS4rIlV3bDbg!&hFwx#}aN zB_WGA1!oY+>uLCYT{bvBQKd3>-=S}wpoo=gJeeR@MU>O6m}EwRv<*3egy;4r1}6}S zear_hc^pXSa~;?->n2#Ak0UE}W8kFjB$no@$Jph`2Na+^fH8P`js)mlA#XQF;k)92 zWPkZ8!q+Oq5^!>X?d5|RKK1d;lFtOBhE|%X!d+ZCCW@2~R-nq`HzV5Ej%c?9kogxp zp<{+RYGA1n`V9|+MZbVic_!r19yhWpUK!e6=HW43#q`h(hEQg49*=Ag2KhsUj5UME z&_pd{|N07Pm}x`ZEs-RT9Y3M4{dMT<)f6J8gwf^~PtcYG38=X-n!X}r1&gYW!Ekb2h|umfcx2NLKG+%&mlJ$==n4lSf8B+w_T^!m9Q4U!m0YNQh4HkmqH6(gqBxsNSh)?9C zL*?as2x*%@_9%HErl642ET{(K>v<^U?n_es+8ezT>_Cz}f^a!|0&z5Nh4JE6jOoLR z5I^%Jd!eQxYh}v_=IhvVtPhIDq}YdtiK?Q^=HAL6{YgJi*`q>?WgAgUT{}t|Ac#?KuNk{MHzaL^1Ml6Gz#_S*Z3tPC-v zp7!s6UAy+$l}fhQcU1;*j=qX6+`NDcJ(KBCv5HtS zP7@5vCxakLhLDhJSZeZW+TX*6yc;BjHhdV0`TfdiX)}yt1^2Uk6iaE7AQ?6%_XcbO zW~#ZQ5$4oeFm^6l#4YR?lKxOcX?89Lt85MODn65Wdrb{_%!q*8v$M%P=d*Z9 z1)-m8S^?QF0w7O7h;@=@E6tnR1=}cpgB`J5ez-9}22l7K>DFL;-37o7u7=ait$ z3BoKs>tkqfRS?8aC;?%64C7o%h~nEtq z&p%^}R|V{j@E7y|-)hpe*aRK$H)LL}u>_ATQs7C|k(Ep$Ow2omTK9V*9zA2$d(J(T zM>ZK07Ow)2yD=n_xq`Fa)k9>RD$8y{I?)YS&3IPiz_883m?duw$n_nG$oTP4W@BX| z)^su;&wM%fKPVytk~2u9R539SIzZe@*1&9RNStPsOCu|1NA7g-7+qsIFzKji8-(a^3m?39`hItOWDsq%rYlaXdd zX-Sdw=6qz!8U^M-`w?6`Y8ZJHHk4!zT0?|a4}pL^j^v?J43r0+rp~WVgTXrSq(o~x zxGNVCe&ek$=S~QGcJzQVb6T*e%MR4lZVcszIQQK+7kZogEqZ50DyU9eOVZCwfN9g7 zP(>m+s4EzVEzf>5t)m|^O3WJbUTnjGipLo1lt46WK?b_cI!Ly3&ciPCo6!BFUC8=D z0g6!BL#P}#68^~&I;%FJmm6F0Y^|5{)%7V5;RfV+b_^DENoFrAd`cCqNPxALZTOmZ z4skCYga(@2p(16sAnDB`K|^~8oH1>4EHvPz1{Ze@3Jb{=L567rP6in8c z66cbQIPZKtN+$-5I|P_Pn=y1SVz6|fz7-`!7~4%tA&0!8AuWjn022T&;5 zLE5IN;D$|?pgo|De0sM6U=#B8 ztOR8lxZW5_j2?+%<)Jw;e_pG-GaN9WYEX9 zW?ZOSM5c-O!p;+b$85Pr?h7rXmFFaxRoyuW<)fuZOuPl_kkE6iX5&ZJeomuYHfvMj zhxwSPR|C+tsqHwZsuJak970pdDAw^Yr*PPe0njxj8BfqO0sYWnkh9N=l!xh&%j`#V ze)1EzP>A7hYcxzsR>M~W6Y=?cYo;b*30yj9NsUn0N54rNNDf?%BtcQPpylNpw4vD- z6{_Telwu|F&*(zJD|z71{X|lKnFWG*!^o}KvteE(kyQWCy&O$hQb0o zd36r5OFxN5DJYW|r+a3US_8goISa`@cO%X-zT$@~*F*j8Agmz091UnXh5atblIXil z$X&h#AG&fMIievMY;Pav~` zO_|4$G0-ni7(ySnk?Dq&pfhhgxfFbZuovf% zc%yO8jA5JwMTRDo!2O0dNMgig*gPg0tzoTXT2AgKj<5ScRncqq%RL1!Wm76M`S5If z^J^X`HNFC?O%0&2BZn;H_{5*XQ)vEKhoEF>9LHbT3{N!_nRUw*nZ?bYalk5nR4|?e zYBNWZRY79lxNihWwitj62lF!hj*62#7x%*crX-lFb&3iSNG8P>t4T+G89F|_6Fa>> zMIwD?ftB7RoE}g_s$QQ&i?^@FU9O?%hNus-n}^1ublsroa6jT&rvk&2B8b9`yOg|U zG`i)Nz<%Gspn)FJXq~qw$Ql|Go9k?}IpYHD^5qUPG;~49V}J~Ft|FZs7qQ{{X=H7< z4P4CPhwV-4@LYu)(s?@`>AY~k?>P3lUOt8BXO%(?y>j|6C2x<{gzcd0rd#FPy>Vna&%MHXZO%cx3NYgB%UHH1?Q!@DO z0c@Hxn#@ZHB{Tc$vFCsFg$tb6BW>k&Yzb>&#L6VJqf(ptnz|i_mdL`A6}vE16^DY3 z)Z%FO0=Ol@3qsM4nV=U#%?7-2HdRR5Lm$>ifb%*t(1Xuw$m&fVXnctbKKgDQMsdYN zzC009uc*US(@yAGE`@Bx{mA}@4AFhpA8Rbw04seGh`O^T`7%9(Ie%g&IVY$9F&pwh z^k4w_^dKC&X~%$uw4SMdp*-^-ubq9rxf(v6m&00C60Ge47WnwA<-mK}AAK6@jPew- zN&3_nW~oyq^Kf|t=~52?&7(W9(c64nM7_Y#Pxk_vJOz$5n4@K*HxnBHDVF=y>3Erm zCz&L!jzYC^+2XP4O!(fHWQ9t9*2L8@&}b>f8huQZHCg#SmGEgdtmRvP^Sv`*Z$BM! zz|RaqIC+(P9TZcW+6oODBw1}5v*G54H7GG=DKQ;<3M;M7gJf8XzN~#mOqblCR*i4R z&ZsiL z@az~!4Ra=Zk2taZWe4_1V@E2sdIKtVae{G;b8wP(84^3V4RtBsAagd}fYlR|(eSlL z@kh%6@Xj{{PAS!3<*h=*pl~>4Z#|uyy1awDRvbtAJrn^}@H$-CIsvT-(qyLQts%Vk zPEyPDQ&C#vK{Q=#1Cp0k0U17L&^h8pHU=nx+lG;h`}(WcV`Bu`G|il#*$+{PUnr8a zDFS;NS4LAI2ir1<><=UxNzWLHl#Y!B^)PYFp7Yu4e494ZMahtPQL=RP`;GWzt^|CE z--1$!GO@g(K=zE^2n|JB*$ZY(MK1zu$)YwsGF|NfJ*ra%wfUw3_Q*v28iAQ5DSdRMyKkdNP($4^ZJ4tqa!as62k<*;Dad&8oUU%9?&II7VQ8LffUsI;Tq** zc>~9NP(hO`tU%bZ6WNPBHoFU1q_k`ulnSVVqlrH`c{LgHOdbjulMcY))Ah)0!XOZ@ zQwOVo@9~3eljxFn@j7+598E;#1VU$x8Z7M;Wj)LjhR>n-mO$PvMK^1K6+mivLYkBGUh+T8Il#m+%yg^t4!nEF9o4lJcx>1(~ntS*N@PY2q1EX9YiO=^_h`4 z?(V>KI_rXuS(HJ7&=mTn&oO-1DTtisna*n3 zl>tL%%%P-@4W({gC^UP3yYLPRZ*ZKbNIV zOFrFcI@aO_<+#BfM$a%H7a~g0e6<~rHF+(GkWGh9->+nQc7J9de+%aK*G5sT1IUT? zTv(-^L{2&AK&Q)kV)y1bS-HFmc{wdXhO_oy)x~$<)F^jGcRnBMF;63ze)a$zs+J6g zq@`IaABr&5$BNK~%6vGmBoFMYt`Ns+UKamzWf)m-2al?0ryWi1d^Ly(2HXd z=tYjNkou%8bhAY!Gx9|W-e0v5?DO5ogniA(7_rGTqpK8txS3k@vJ*e4lP5F3wzG}1 zh2XrJ4|y!84q+R7ky%{^HDq=j@xJQ>DNiDyt!Xv+V6~Qv?6i&R^b+UvN@65_GL5D z?w^Uy2r;awozI?pHXh}lzE9gKPrzpvJwtDUYp`B=C-Uq}0<*#W;UQ;lRyV1QXw_Jg z`{~})xIuMz_3Cn5=Me+J^L)^uSILz5)^z62=bPW=hjY&{x#d5fuX(yJU(@`X4gWu% zSN_k;57!OtMLW6=`=6a}Mo0Dza!C+vnQwf{vWSj_T-z(`M0V3$ew8S$q$zi|LOej!+mng)zk0gn2X1+@>>pkpC7)v z&!6~q)UVplIPjzV*2ZPMGt>J3{LG%}M)wZl*ZDqJU#kZ=jcY zW*~qLBSoIo$lxtM`rM*{8ooMHGoLq7dX1?#qh%$2?ed!1)y@xx?xy0)7CZ>=5F!gV z$m2JAvyl4$OT6ulB$jm&gXDJxSYccX-3rAd?xhUbp|JvMBm_{O_C})Zr#ZQvQ4N%S zg(z&i?vAo9ZD;42$C0R^Ld@0S`%w5G8L(H0#uwDv>BOO#uJXLmp=jv(L zR&*+)de+b>SBH^O!5Mh=_7-yUy#O)LGa!|Zwoq?We9)3Xr_slj%V<-_N4lsZ9Nn!H zfGbjMG~db!bbO;RJQNc`nM(&EvApqcXTBs++n9*fXLr!E28WP(VIe%dd@8iOa3#U& z<5Bd736ub94{Fk?Ms7PWHG7vZWSY1m>9S$8w0{ddTiF8*Q*=SC+e7iFt3fDfMGXq4 zn$Uo2bBQgMLIG=>G}puaU);)2i`AV_9OHRf>&a zH!Wzig$`+%L8lK-BdYNXnPDPHc-!W5k8OKl}$pmf%nj>A^c>>fM)9O zvqU^ARfkA87|{)H3ecVEAy6{Z6`nY4$1>poNN12Kt#0B65*=!!IH($*3=l{BI~|Ej z%{(w`jDZ`I+t`I&c2tGPHj34(-qfzQ?Jk`coUWO^QZFthGpMsw=8j6(O^5C0gcs8@<^mN+{zY zM9nPO6pxRAs0=ZIPgupM%6%4H#;? z>9=omuzbB9&dW@uUhdmVeYqkC>+h+NtHTFi)n(t)tc1ZxhgPyP6-j7 zT+Eg+aiWG~DT4SS8CJwxaU?SkfRm#E4h2Bx6(UbUPn!b|uRX{4Qe)xx#!95QU@tx?We-PP`JwTd5HUX- z2L;<@@TXS?vFjjSOc^DBtIBA~(J>a4O^b&(ze3Dc#yR)=l85)t5@oiH7X*P1*)+4t z*>qG@0g61@AMI=b^y1|*uv&VHZ1F#hpBJwq0*Wd4bK@-hXjlfa<$Z+h`W4`QXA@A^ zb!B|0{62Lf_XyhiMi);QW{ad3=;JI|UVJd&6lHIgW41TwJZ5V@rdADMqZ7}qsQXWf z&Gg!Lqg#gga3>%Wq;Cx)WiI7d#&kA*wYS{t-srhb^YsmsV0%M;fX3 z(FI7=Yc6t*pWn@6Jl({!_RtP{?5CBDb_*+_Q8zB(VKY9VEh)Rv{!>z9)KNYpJMb`y z+h2nxsfplme$n{ovgKGhECFo`Jjb>am`Cg!HsJoF9ifBY2Vbb>fTi;AF_sU^TJ8zF zJI}F2-wdNH^0kQSUTqR>?vHfuNYDmbqS5?fTU>kh75+GWGu>5x4Z9XpqTEq6xVF6z z2eyUKX6iO*n%+9ZHjto#F7Cruy;Japk!mCLut{1pK+MR0pX|n>joGe3e{O$C?RSRME^;R4j_5`&mo~P&T7>Si$ ztinoBLeO&X04^^RAoTk+v}*Q5)KS}kws?#|ulP6MA#7=EyHu0J-4G%IwwM+pO)GX^C%F6ADwZ#ssT)Jxz=8C585 z!yqg^zn;1iz1N$Bjy3U1@^C>v9pHD^C1og)Gow~}iu)5a2d z`yjSa+dvrgHVs?4C$Xn@EHqoqS3}2@iV&WnT=YIxht$+)lJfeSqX zL-R+WCDLDUee6;sapM|VGjt=peBd+~dUX(~xW@--w?5;!RWphGR0Fbq2OA5}9Z3H8 zU^0=o;MUby$o%OGoPQ-ysVQ+3vyiWsl5u zx0NEdz@@N6@*2&$_YuFWk;5hx$y8LS7rxQLCfC+4fpa!$#N9<09#+1fH5~Zq13P$7 zhvQtNuPKJ7?Ux}{Clsm8oOh9_A4x--*X%@#CM)8!;X{e}mrK||w}pLSEDKqF+>4IP z)FO3V$LOp(D{!U8E3}n3(U_AHEF-GM$=|q+-uM&gy+SuXgccqcS zI}iGf^f|LEJ9m7nQHBaSeG1D;4A^d)v#ZQ8zVsP%XmJgCUR zy4#=PIfqW;mA?6yIXT|6#*(JaO&dZxxDAEO78U$OqmjDpGywJ`dw{(3Gdy#}aF~NH zP|e<3DIwNF5iqx@VR~S@%#}|B(ezMmHbs$MS2uDa%Lg&&yykj z@3tcGHw??GbH?*naSLNP4N!$Z)`)D1v|C8O2Dtf4Bs1=qiAMp_$R;iA{PWVe3}jyZRc znzqau$Hp~KXodsMW7p&8L;SR??3#3YkA^r=k6j|d)3~Xkg;SwX^R*VDK4BCKvFJ8ln{LQo%1;{BY z4c9%Fg;_RF@RUGNlGDn`F>JLbO#^2kt^Q;2%@LSm2c)pJ#v!yYNfna(pWsqw57gXg z1M0J<cwPxtH z2DE<8#`o^_hjfo9v{B?SJvdtsW_TGQ+T}VL<|u(~E%m`w1CCI~c2?pShH2nuq=5?- z9ic?OUZ(Y(tl1QfMS5+wkgD|#Sa8f54OYu%pTj#~#+MMd9MX(t%DZDFMRE4UWCQTn zx0_P+R3&)*5xiDZ5H@s;q}n)mudx1#L>>->$<8x~pT9U(8TZaip<#(x>S#H1ah?u% zUwey`mMg%W=|$i_(YDt7wK*jC8-j*~IyiNfqTN9XIM{GFwA&_OFN+-PnchV2Iy8pN z8u9=QX*o$tE%RrKESp1(9g~e8mh6QYo0agn(~@v>eF5&0eu@uiC!;T07vQ-2(n#vr zHXOg=3udi~MO!W3u(zp};REzir1td)zWibodOxBR`=q#`>FSk~?q*dyM<5QbJjYL4 zg`}YI8Yd8?IS^tF$Rm5BW^}W~8kR*S;Zu_XaP#f?)Gdn(c*)+I*iL8{`TW^}(ra}g zGq1eCT{p{c)S&#@+=si+rK<(#;pP$4t9b{_RL9$Ku3al~ST__Tiua({^S903-S?qh zrY94T%`%+a>mtNoHJcshyaadDwqncNM%+2k6pdTbjq|Z~H{U(5&R46%qMjiP%%F5I5Z^#s$I&RNek6l-?{$0>e$Itz!JxZ*e49S2l~( zjf+P9*PYSb0d3T{<%39u_DfvivY9^E=|nY5dP<9UnUhaxS=7q4r|}xA)7Udem!t{z zN9nJI;yrr}P}sY<D)y{Ts?*NuCBy|hL+G!TxjO%7|n?r zD`0GwFlksXh}+vP;FQy2D1(Rbq`$yB1Va4Dz`(XSzRS?jL!6zdh zn|V$h$j}+#NW6d{C6mN!RXz(q+<^f|cB~u@(JjEGhNtk=7njhWr+UQZ&N>LYUV=Jt z5jv`8j-$UmAPo!Kk+OmUti5Oj=dab`sct zJ)$)(N`t2GIP5vY1`f)5C za<~kxwQizUOYOi?IV$k+(O_g=w20k2Gl&?3Z$ySdq9EXqhRPr3ld#7R&~Ter*funY zGL}=q1JX;qz$oY(C`n-b;Guy@*c&0z`fzG{y>Hc5lo7iqQ@pB@~{-d&r(ib6B+ z13Wo+1(|6Z0}ZcPaO8Foa$tGmD{Es>jo&lVM}_h9-Sk`}QZxVtItGHbUH~Fjl!^U_ zOfvrqMK%Um;tDk@_LKFs&@#{&m4ua{=ps$5Ub)rm;WR7av?v5zumUrkv(d>Mza8?+r-F}AKKH1`>ZiC44 zgk9KwrvwNUC!y@hkSoWKb~4?6@sqZJw>;_UWcc3#ZtubF+EcG z5qg}Ya%e}CDe80qWD*jMmHcO*mVJxRNn1^FAmS)Go|uP&nhel$`MKCIU^AL@$&tPE zi5OPheFyXD(zx+`A}#$e1zmY(0A35T%&ZQdrgwi<1f>KIEN#=DAlda)M*b7L=)DUy z(S9@MecL9;c@c-dcLmIB&a5TN%159)CnZo^=?cS306Ey&VVm4^JUz<`#!W|%IKUY0 z)XXK5?$x4o^H<{lJ$_uJW(1iLg}DE*n>bE68PAqHh-=OS64!@C2W|=jvq`B1?Wjf?zO$%vq{}v0oj}XLHvN@hI;-V~xB8*aQw|64CfCm1rX8@!64`{^*eI0(xPJ8r`gygS>#!kiSz@f>69+=-u zr3UH|-?#5^#3V!9ASh2CRZONXE(kzJ;snXqE@yJuQ67!d&Oj-xVK_<23d<|C;M=7N zn#HmD>G#n1ON`}m*`O`oHI-=l$erClfXt#-qM4$|oPgDfJol<8t7ff#Q(KB;bXG75wJvq<8YmrrCF+IihE>8H+M!)O|1j*7e`Y}BTZ&7r_^r%Nv@E44h zUCBm*1)fB0l{JVY?4Xj@B+(mZe4=AEr{NKc(vVMW5WUA#9YQCWLEY^ZG^Q#V9ai1R z;eRetPRpZZp1h?MJRei%#TKE;Ik8y!eGD3NHJU{89i?<51|c(^rR+viPby93B-)+4 zpYx6sKFW>H5P20h;qq-Nw5R(yEIm9DD-Mi;Q6rDj3ofSOjR%zJCz2N^Rf#D`qf?N& zYExpS<6#6oUzA~We_v`cJCTY@=ts-zzd|{uIKQq=T# zJ6;H(sDHq5e69E-x*H>bo7!nJzV?39QPE+j`PCrQ*t`;D-^>8^hCC$P=7(fAcbI*u zFu+(l0rC&sp-at5>39EMd+!}pWs~d+0}>S^h+-m(qNpfAk=@mpzyv0;B1#ZNR4{`H z$p|VK02L7sL@{7MQP^D#ijq_i1LlAcOqfss!}rVpcYUvOX3e=XXV$&z&b$BE_4L!d zJJjy3uCA{Bb<*2{F;=B8ufPji-X@9s=xGRjIvQL@jKS%thgt7;-O0RduJnG43wS!+ z!^qKPqUCRNNGeeTrFac)>C=dNACG~hJyM9|)M7U7?K2Sy=cM$zjRDb~tE~H-Ec9?! zf<1z6(>p{Bx=~xwxM`r}yr7}b!7~6Z-FXOZYkTnBfx7tPF%qlhLb%?z1n%t~z(O>v z*yrqd;J7XjN>gs4+YL9-TG>Q=6feblu5Ta})~{iY$_#oqDj$9-_Q8q>GZ^3JJJuAa z^X_yJjG7~0G)Pq9_75o>Y8qkJWoP{88HlF}?_;U%RJd-e1;!yy;EED}&8|V@V)zX- zlIg}R%g^FwGgofdP>Zr#Z<8KxWDvGwV81;@xN!an+%Yd2Ps*HN%PxH@P_XKT?D_+e zvHAtxcF-Wx9lG;7ey1@0xeCm(R^l zq)|h-_cblrbVZ8~+cye6wj`6m{-bco`180e_7uxK_(i;UQ#NVr&6usOB+q+jPQHY3 zc5Y6l=#Y7)IJe|W0qcB*rC$wz7D+ptIQJOiR);XrF@XfeWTVfFp8*rY@4A9R@v|Q0tDvDTNE?zUP zJ2a$h!ak|)cz$L+zCEVS<>+UUnK2HGdM+Tw-KOBQAwx0#*eKkQc|u$xQ_Q}kcINw} zEcmOQ$NA{>2{^ZD5;UCNgvZSe=f65H;ws$|LFq!FWkc6qd{9{qh6?tQtYPN-i_8=% zYy6dMc%?||s!rpTaw)EUehr@Xt-!VCC()LjXIZk(S~$B)B)<7`Axt`9O#4VFVad`o zcC)`SSIQM(ZmqEHQV66&8aMF|0uICAtaIRfQ-#zSZiR%XNE~TY4{DP`*iKO7(X)PH z%t|q3&f%75$-#n$v-)| zBMZ|cb9ZlI#Zi5rHuwSlyl%o`lH@>chj6yZN|Q$ipNIAC-RN4s!L;e~C~n(hAB%|H zjMp;tpv-6@9y@=Q9XXPLlLE#==ZXdvGzj>-u_pN9%1kETYbidCy-j8h7zzh?H*VN- z0q?Tm94gDi)7^oa!GAzAW^|~-_xf2h^kO%1vC9tJwtpUEpH72=<(054Cl`u?SK|<= z4Zz;%iykIsK$AlRZaO!IZFlX*l)t=T2YhY$Pm2WH|5c0=mW*N7?L6_^RddpFm<%+? z=i`h1l6b?{9zU5%(fd~OfM>;!KBp}q?9mjMT-G12nI1*GIKfU`9Kt0+?~xMg`RI3l zI-Ch!F3R{agTzg%fQcK6QA1jo=d+%RGaeigt&s1B)w#CdEG0*q7cYR2Do#ez{F1(v;`eZHRllq?HfofZ@BhYCdw{sh0y4tbo$t!Khq5l`QR;Fcz%C1})qfa&HYyCt*|Vr>=Z^hXjVtyg_WUJ!!3Z5@dOV&|LewIP#H$_)ZsH+D+{t zzUsdpZF!;irT${BI{g^yp_gn~Gw3J|3lR3v&JTk#4m+Xin|x|z?uGh>1NfOIU*Y7R*rLB1iU7V z9(30?5l((x1R62X&=k>;2EKd=`MithLA43Ibz&g9Apv{(r@^8(44Z>;V3hh?^qesl z29M0aNqLg|VNE#g9^DJqt@kErp8)+tTD+wm>1eW%>=}HQj51kGb-Fzu{bmqh{&y02 zzIz9`e%|~=r!ZQ0@g&fb6;P?th4(6P=liQtVOg;iOwVy6+2tqj>(e0Yn)ic!d}9o~ zrEarh^>KV#))C^g`wb&c4XOPlH@LY>m)iV_7JO%dNWckY))g(Xke zrYl|eol*;wkNXZP2hNd_8Dq(N^-O54&|;o7o#0YaJu^Ov=+r!tuekgPLYJ?_p68`m z(pV(l@NT|BA2F^9)z*72BG~>22c0Mjer}fPsG6SP{L0|&rxCTRc zg#y3%G8z4G8~ISADQez*2bUjR#d<#uuD_mWooF}H z`Y?jtE$c~(Eqb6%ZC|YIB8#6lN}^$i2eZt3h`Ys==s)QkFh z&%Q!Vd>g}GE+50re4L2g=C~2xUh@21@iasmGg{P3 z%eJw(Z|kr-#uHm%3@*CvN_V}u!JawHW3vM~lU)N<@Xp&@%+Y^9{2r=vTaPHbAax#H zjiqsQjvXc{?!YClEO@`o4~UUL9`8(#vA#j(-2dKc{(g)Ubba{(t0o^rwO#H!SMLD$ zJ~05TcVFSVehu_{`2e#%$U~XtL9{wxhSE9fh<+b6++&f^qZpeu#5 z_iRVE31Z8Eu``L~s2e!l@+(a7aly13aTtA7iq~#1;qhgG zOM1BY+WJ$XQL3HM@of_J>$sWSHuWVJ<(^_r!EM}_E+-mo*avINAF@(9n`>88VnUQY z&s{Eq>zvoX&|UJdv91_(35qXS8N%ucMLc`s9<1o~fo)Fn#(SUofZMoEe7eLGa6NuZ z9FQ3fTeo%OC%+@k)7GZp`eCHsx&VA|_asYBmE(cNTQJuqo{Z__!*~D6W!`pk;Bx#@ zGN{TBn>0t`oDNzTognHg{Li;I4 z1s{d6bor12Xqa`CPv7W{NoLX5Cs2-d7*!76hicGV-=R=8JQqJzq+lnn99T3)o-BFQ z8+&!-plT<^D~%awT|R_;T9ryWGXI9@6L_t`xU6L?j&#d-RRbUm=e) z>NQ~z-v`CoM@4dVgHSF?64Zx86*Spw6uW=e48!Nm#OUlZ7}~OsU;3F0{-bX|;*C0p z2~B|yPfbYvLUn$%@)gMRl3{i!OXy`SSKjcpBl8M44t6dzY*ZI-zVD+iddD4P?x(f+ z{Ju$OnQ#r)A9_oqE+pU+sYTJt6zD7&8=&|_*K4eSU7V(>`pjD8GI+Z`%mHKTlV9ofihg_u)w2ZBdEot z82ZJm5cQ;f5yK&x{Kx9GJgUQU-pCcvu&^4pDPAFi#@E7Fo6cm+1T~S0R5-23*T8zx zfo?swQrN3?fKeyC=(}7aR(VU7Cq4k=t3pWF3O8sPqRw*^j^eS?PN;0SAF}%?@<$^~ zcwa+x%*cL^yPvNUJ>P2%7LBjP4v+PDIIO|MI1R34_ZZapHn@~Lj_gS~#C-3`;JfB{ zxF8{cE-q5I|6CK=*{IPuS4{b}%v?CIb}D}sn82eZhVfj9$E4!XVEm?13$okFpnlD8 z`t3*!Xy|m}(y`98=D=2}bEySlcB(Sjkr6!ZVKehP^$~(Yeqe{aaf0uJ5|6LjO}8H3 z$s%l4ut4cTs5zDa-9jhf#22A3`S>`VHbxcQr+&bMJ681C?p{sEowafZ_R5~fm9Po@((i z4_lJdc(qO>Dw}NO7goxXEk<8N+Dn4?J|IFR-x6XXF_9{MwFl2fW2t-1V>TpJu-6^vN8^Uxgl9D`g>~yevia>5 zHccyA)RHv=3-r#g?Z#{PhqJom)hQXas-luqUfm`t(5@v*y>_6Avmv=QA)Z&NEJMvV zf=N*6!wq}U{dl)xY$vP>v`p{&&Rf*`D;1w9QBHw-QtMPH+ARte`JuLu@Pd~ zFn?AODa&8(59A&<9 z^#!Ol@|5^Y{8tw7D2f^8tI{=(wnJD(1X9f94U5RsLOZW(xkgno}!u2CjNeBNAWoi1XsrZ zC`jK1qfNdL;~5Ef`k*S4+p-Oxo&OH;LltuWPilzFW@zuHt9IH*yLwf|y8Qy`XO*qBIYRo~MuMP0|s|eG>$M6EZ1nhHj zG=zH(<7wIc#ACyERvs$&amdVNRsHL6Q72ja^>v{*vtSZ?_SglM)O3f{doHjEcbDO8 z*BbWd$8PfZ*;Z5wPGqW+ONIT9P-;0{n)FUrr7qXv@Vir@_NX(OI6 zjaW^-IOPF;S!+NCWykWBV;DW#4(1il>qN%SLv@xhLxP#s?i=AQ=A zct<^0q^L)YnmbbE#M$Jj2Jqt_o{IC9CBuu3GW_Y&+mKb;0Ua$i;(_KfPxg^$mE9{>i_v1O0vC#2eEo!)r;unYe3I6NZ z_`ZJ!EA6wA)NS!5me4htR(C#t3OOfn!lC_q+8GI+T9$z)!b8O!8$0pTnx(YiRsy}2 z)s^N=8^n_aWT50y!NxLf8~4t5i?fWCsM*qfbWL|yF+(wr%2#_@lh%lwpG9e z9Kx+ElR-wn5{>`4fXjDOCD#nTvx^H4f@c5q_;V5ESGKy*`D&X;o)ts4A1Cx&J@9D>hp`0(^m;)@SZ3Olk8?T>uD$z+(myKE6=4Fd;Hv$6v}HHU za~cHqq!ysK(uv!=PQxILXg*6~Co4P`f@c+Gvc7p7NBb*b!>o9evGoS2lM%Qzqyu04 zC569Begv`(_u#yVC9Ul}IGN;YBP@i%0^tR8esa98GbE=%mYrx4DH67lCkX&iS&1viD5i*#3ShF@DX z=#-(SLA~w-cGc=YgBtpfp$^+giFz1*+Wi*4zYYSUQH0gKm0>!Q^4X{>aa=Z9izYAD zzzKZ@vR-3`!Q_Bzc;TB2f3LX)6AoR*qsIsE_ak<|{2$T$q5Vs+w|N1VgOli5vo+$7 z>K%v+?Z{J2M&?b7$M5@I5YOGgbp4tL`sPWIc-}#KG}zq><-5cXZ=cSB|B5%l@Mzdl z5e3y_yU{nbE9mp(`S7rGKbs}qOk(%0<@HNC(Pg^2FzU)x5G_st)0dAS{#^{lKaQn3 z>Q})}5KvklZMk&08h*N)2dYVNH2u>Knzi2r55G#J&xfwTFDq@u5nlE%=|Cx(zbr*n zCuO)e{5HQ>Sb@7&&V?-J7@XT8Lw}fR(@tKi*on!JWZMK~el|3P$t<{!{#Hg2-=AH8A= zZXdXfwv4z8OI|6^PE~@=nP82P{3R;pOF$n@6>@48RuYXeJS~p zuEF<4%t29ED4IHXVaLWWy7e;`Fv4atg`bCDspcmZ;qL)%UY;~_{TR4Vc2&H!aTku> z8-erHj*urJUu+TlSl8}O=4%sP;GF;`Xm)=Ls$&Q6_=`56yW|#oIi?ft>SKUYrB$iq zsZTh%XSg^`b`4PpzjzuGKHDb_~}m4SW?sghiqGjVev(nMkiyJ z`K3_0tO5Lt3P{vIS$=oW0gTUB0eYFnJVryEMIRbS&!t^Po5C(E%Xbspb&M6}p6S$J z$w%norvZnYD_Hm35jcMIDVCb*hZ4Fyx%xqK7@9TMaM6jgkk%Tc?VfcWS`q zV~;?pS)O)(>%$6yREWyq{y1*dM$B8H0B62wz|R{e@wBrgB)Qe&7b8{lE$BwyryoOc z_6L|y?u8}cHu!z&M$3p>KA65kg|F7F#50yrXiWE@zFZ^pezc8ecQXaKeF}7Kb1c=J zwu4#NP+sS705#|J;ek_r;Gn({yxb!NLJmJ-HJA3ovwf!oeBKviVMTXtacmolS(-@_ z=IP=R>j$uIz7~GHe3BR%Kf&;OiLmLjE$^qQ#$$`8K(ggV=z4NcesHlYnjE#`-zQc= zPt6RJcK;!YH+jQu-BPATTI)dNQ4csgBa6=**_m&6u?o-c$$|3YVKnimBFwj|CR==b z!0+NB@UA_K@#C^Fvf(Rv7k8cg>>UoBd_IHS-j2NQ(DOK1Y9)L7GZW%mipZm+c{tu7 z9c2Z7%&#-;Q9S9R_)*DKNbNJ0?~aw`R!6&WwE`&?BwvFql@r*kS!cOgr{3JE%X_dk zb>u759%Fv88{IzRI?7#L1~2EVMyF2(w6RW&heobMJzE!SI-|p9IwY~A@-%vH_6t~7 zu7oBJH$$V15;u5~Ox(=U*{N|uY2Q;?yw_z_();vnws4sn&CH7B(-$_d$VA{UY*`MX$>Vc;exHpS$w!xJSfl#DDG>7b4{00 zLz)5?zNSLT;;B5Tb2yHvHAYi~aQaNJftRd^!ikgRQMvyaoO@o6Ysf0V;k*54TF5@; zU7JAENhm~Ltzo((91oov!#f|_i}&2+>2T%(dd-gnf0Mp2(>@ZH6|aHq75(6{TP(Tu z`40O!=7RV(lO=}3>hMdHD((>Odyti1hg%elLEA0_MhdnVnIFIv?GkuE!w^=g9*tE? zHVbLi@G-WOj<~v(&K&g|q=)4~@HsU|@%qO0hF^mDn+}VA4gCdgVt>LSs~t@FiXyH) zn!r8p%i>o72i|v3Kj>gv%|x9&c}~wX7|(U-w&lk_PlCe=*;?qYa}+&cG`Eedhn@o7 zr(@$nT$;3#&$_jUIjX)z@#f4Cdz}(RmA3@j{>t8*o}9#{REFY{!@EH7wFlT0#p0Fy z&f+6GyYp@9^x(XJODy^kPDW(xA;b6G70J!W!MpQLkOk7}yjj>kwy(ND9{CN29x7|m z!G8{(U8M=NuQEj`$9JG;(-I!@aUm>RYQ&|#&4RlJDtYP3Sy zl&)N;OncX5Lu0Hl-|Tt}u6$01uAw);Pp5?FDXrl@(@b$x^$l=H?N3(B%z>+4Z0Lyl z*$|$$6`OK*;B4y@c7N0ct}3}!I1gw=ySg7B(nBs_?1mXMswz;>#gg&jc>~_m62uoP z1wvq+4ZmKa$~6r=;f|CxG(hjgZTK^mx6;E3HMdkMaG zrWmktE>iEEINLXkY&^6PZuT&Rt@g%93;IJ)pfgqIca2Q%cN5>^Rn(j|3F98di2b+c zQ`cxpuTKi63+iR)$L37D#23Tdn@`Ycfev5uJRSOb^(Wiy#^X=h7-CaD1-jcD;cxO( zxasp?RC{rky)Y_+NX$Z`C}}oo`4PHGwI{6&$bxva8(8_O3mq9JNbPCrDB*Y$LcJaN zmGu`eSMQX_&-ymp9Qy|4`mCj{VoSDAJ&2A`eFNFwKEU(pKD6&`EByHN8>p_IN3^}i zG0BtabmeYye&*&fSYvLbE?uruQP=?DsU#24KPHtP5*~({YK^ zEN~5ofyDhG7;xnfEVln89!%}>Lh_cEBsmV`R`in%inwF4hZ_#<dKeQ@O^N-XNnb zg<9F2sb%p~@b)zzR%#vT>?5;^0UzS71(h#s(yc@K4<+4>(XR+*b0Gt=j zQ7RiA!27-mT%yzjd(YA3pOunfNu)NB$a5C$E)_7UyV#EyINt!HZ}#Av*WbyanZd%jm<%X=w*}Rm0x&8(5)-T?n4QZde0cdJ zbHCmh*Sp%`Z!<=N6{9TcJTkgf5jSysNstl=@4?v69Qrh^vhK$pi z1ik%F(6g_Fvsil_X!w!AIHm9#Q{EMi^Pah(d9*y|Hg)X723d3;lgO8LTF$%H`Jq$g zE?^G43qVir z5pKDA7nQ7RF@C%eH}>d>>KB~x;pFKo`#a%z8b)n9@xP4?zq6tNV>r3X}WZ-dI!EKIGqi(-OEday8&K~)Wxlq zTi~$Zt36?4BD*&yR$M&rC^ow|Q0;VSZuY(cEaZ^6O^p^VV0wabEeq(b(@S`=LKtTFB>LT{glUB)g71SKXsxNk!yQ-9*$v7(gNx|7Iwzjm>nSAnaD`8$QP|PQ zjH`Z*<0r0f#eE)K_?GPBaD7fA%91FSlo^Wk_cEBr_SI;fGzXI|7Lepl_JW_N9DQML zK@WVEn((u0 zEP>vXJ3iCl`z3`PR{K7vzAucr$~y7T$7)o+Sd-W7eI|Z<+?G%2>_fZ0T|{4+NzxBL z?8L`Z?O?{P?Fe&1A+%bJS8aR;wL0Nk@k~Cvs}bfUy>MRg{Vq}KDBS6>=_ahZBPsfr zsK_^ckmg?(aXjfi0G!q8!RuHT?EBfA9=m^mY02vG;D&QtJ1+?HJd5C=buVamLHNd| zboxBA9Ii~$=OaH4fG@}9Lu5e+c1{^!*%*UoH-wuLE-+iS??knK6)5f%aX5k4~HD z#0z`it=elGP$tPgS9|ddVOKHb-btF%;Lr6E*3h>DoVd!VS)|x3pWYjO5FWB{5gg9ZE~fT!(zei@-}uT#REK^ zUBR!nyJD}WHkcwS_|2bnXP%#RA=p}yA3OaVr;L>0g-<+~e_0Y+=@kt38x9xPUyn!E zm`uE~G75d&Iv}~L4rgDM;2EDu+?<(1+N?JP2CF}kG1EJ9!_S5I<#HVK>u60iO+VnK zAFIfGXH)8&Aj_`GPT{8OPQwcCNi@sw1dh6~6yDjZK+|%0jF(BF7nVFC26vrkrv+h{ zA%6>8M~;PDn>*n9umS4*D+?l=cH*53HJE=R3JvyF!33oM;u#qVKJ~-k&;kQAI_3va z*Y@G3J;(4xOE+HP+#BQSBY6C!G{~D+hW&df@dd|ZEUmLDnP$RH&^JCuczJ*^PRc67 zl68dzW$FGgjmc=;Kb$4@|Bg`=-9RbuEi3w}OUoQ9__|67 zS`c!ASv+;;^~*QF&2D*Oi<4u}uV*p2c=-^#+WsAr-32U%no!*6*MYvDr-480Z{cgl z^bvFG#^ISfRemGwI=dkFScm8}uqWfB;BxaRyx(^`EZ(6>^wiG5b<1qrcWWJv7mZ+w zTYB@9fpI*8$Z=`sO<>dMEKKk2h;No|;~UrvbQaE&E|KZY=I)jRUh=_m+XF|8iBurh z4{T-kN@U^M{x3LXhKL6nnLxzB7tp2aRL)XPvM+`v_-p%I=pf{xpwfZ6W>zEYI0;r_ zb1e0i;--oFP=BQ=Mx1|$djt&qcMhWSSBo~_|WS9;UwJ%)3~&u+rv@-TUhXEC*}9i;E;ORFjl za$9x>edJZK;*l3F&okh2BIc35+=bv--I=vannsM5R*47tc7fRSz34x}7uRV6L~5u1 zJl^m4HoV^rZ8^4*zr~CDzY|}4WL0Z@+iU-i@a5)TZ%yBhJJlNc^ZP%A1Ha?Twc{>Y zRQ{0zf5jIU04&?_-~Ol25`7m3tXMvKsKj6JF^398{;QlNg}#gDnRvIw z3-IQx@xntpo@RT!{t`A5e)m@@#4q#rS^k$i{*vckKJE1$`Pb0@{#QF5`nOB1zwO?} zpZ^g&bkqNNJanD5CbqY`owmRK=jCJBe^WmGd-2f!kz6}|qm==F%C)wR@E=D0o?KID zoA27$@$betPm^seseSxiz1kXnf33X+f71Vo1Ha>(7q#g%|5e&Q-~D|K{3AJO@v!x` zf6B?f&%^(hWB#6;w7C7pR=4X{?Q-m2)&E+@x17X~wqk}%ZhdIa-=!@aIJNct$hLGU zZFuMH>3+vQZ%Zyw_M6=OcC$5u?rrUA*G_KNw!UxAPvS{ySm#+QZ66oke$Q_}TROQm z+DatePi%c?&#(KW*06`me#iUDyAsJ-e-x|5eyukl-aGy0?Yf|F5)rtuKlmw97f6A9X^7lU_^t*h~X=`{|TmNb2!@pa9wP@?N?c?+Je%xM~s_5QrbMKhR8SM5a-)+LdPO2NWC^cf-}i!71y zbTyXoK!TZDhKTjZH}O-;KIB`RI!TtLBJ&P*mVU1yMFXDq5ZP{41i9z0EPwhx6QKv; zh73{ugkGfDNQ1pqNe06xZF1A{1Ejl36nxL0G-9i3j%BT6zU79!rXv39g(zHn!ZJrQ zN8~7{#Ps6J#VfOVlHz&gqA#x|Qq`+tDR0~gnQn^&Kg{7c?1Lnqv7!^z9WWkMuZ>0f zhCn9eyF(nd^(xW%*h{opCtphIB6b-bCvx39l;piJ6iqq%LzLXNnA~y6 zB_0>GaaDN{`tMcdeW%Ov&8Wv8ZxloF$=L$7yACz$6C!e26)wtJexe|7hJ#2h_@!ua zheVN6uW<3kgKx#=wg*IH*+fxf!tMfV+i0kznSR)OL6MfA$#ov-N*+ z;7<i7NI|G!_Ly0H7VYOe3R`O9Z7 o_EZ`uvrZCbzuGRoZntCqZMFV5j@#q^KV?wEX8-^I literal 0 HcmV?d00001 diff --git a/facelib/det2.npy b/facelib/mtcnn_rnet.h5 similarity index 88% rename from facelib/det2.npy rename to facelib/mtcnn_rnet.h5 index 85d5bf09c9e42053f5587195f55b1c82abe81fde..798a80767380ef6b8f887cc769b7b2b73e6745bb 100644 GIT binary patch delta 46554 zcmeHw3tUvi{`f8k;v2*iO^iq@U&zzYJ#zp-z&Da0hK9?VgoOYFH9Pn&)6%HKM?T7Y zWVf(V(LHm(SDJRsN1Eonn&$gv($#L7mA^A*W|rNvyX>;;>c9T%N11bGzVm&Z?|kPw z^PRK1WVhFm1&?yC#zsZAY1yTv0)5=v6wMUw(k1xk^>^p0-aU7fMpTLduO! z_(G|0RiHL6QtwXi`#4J#oWhIr91}lyutEf-1ZYY6DHKoRD>q$*JuWzVAZY}SCi)Fg zUhLqAI0TU1_EMJ&4uHt-G}%Rva;i zKwCuzg_~khMxnYWWlBb2QBJ-_tnVyRM&k+n$4p@Jr3|@9^odEM%GQc-g3wJun5s^j zl%Ywth)5d01P@Y19b;`IJXj-X`I^j}EU{UKk2A-aE7C1Nh=(eIH2#FjL|zG=DYqM{UyIxoX_ppWkeZ*r0Rb8Bxor3!nWvdGi3(1vA-y`I9y2 zDH>5OiQNfeZ5_)ZFH-^}rVO+Y$%*xWQ8I2ReqE>HLUoQtotlf7C@!3gc*xAjEyjuo z3>9z1iAN(&c||$XB+-KU_(l$A zDVh0&dFo=+ke031i0Fz^L}rVn_MBXGu{for6sn6Q0u-A%tvF|j)P(xw=4dk1lI|xL zWu&C$sF5ZJ!-T^eO-^x+I#=YgP;8sd$;={AU6`ZRq-8vtCc=nZioEGP#v-v6kq_|m z7aNe3AgY;aO^SMo1e>0NBu<@-ctuTm z6y_Hh@*wpkvJ_@yP0ke=#Il_|IksuYt6=e{gH+mBKIE@-i znq6|TXo_@rh*}c=O`kDgf}u4Q6lUa3mduee96)jL8N(9{1=d-6T_+RfsLn0OR*M77 z;Ne=KO%4fxJL_qsPIBC2B)Zs19L9wi=_%?`JlsTs3`9a24@6NTVt;A$8S8_?-Y8F9 zno^LTk4!Ew-IQk;p)QRh?dC?S2^=OAg{bHaQx1<)y+K)o93+yjZs-Og*~F@PN2xj4 zb&t(hc##l?MmzFB(nB}&O_p4f)W?K~b*?GcqHE$LUrD`*o*G2o^dMXlCplBozDZ`j z4slIfJ-(^Ig9({H16>nGJd7#dR5uFk=9-Mg2!|Y$c^_rMEG@byb7pDMMakzMa~|JO zE{en6i})zSx;cFqsxHWQyf)ZHAsn2qi2*G0QARFGH#QsZqeTCcEDA3C8tbFDM#fK#^--KW zY!{8uc%+#dlakq*FiVR*N@tdwEUY)_q+}1-o<53ea2$nLH>ZzWbzW+^I>p37XSmcN zIW1`qN_=k8;)s(Xc>R8+2J2*vI83C7KG`?8WN?J;Ga6l`iT?+Uk*=nqib4JnG5Lcs zBL+>1(2R~$XN^SngEJ!{qawzR9`sy9WOPLKq=>AN=)Cb7E+T(KWI{Uqk#9&$pk~9L0 z*NEE0vp4#S;=@d$rxdz~#d<|r&!1e376rPsZ^$$KQIC91uU^-!P+fxhs0*_u=VfS$ zQHVBOTK5<&a;fXC&nS@V4wjz0L3kM@ZWxI1r$|Qx5O*c1MMwsr>tn(i#5JA7Qh>DZ zZR7%!lkZD7B+4@%8Y#t1cMV{MT2CjtmD;WqIgF+WCd9~0W3gG3BueCrz`oHd&? zvPWdkA%fx$i<>l!4KnJ9F*y15%>!Bp)!DzXP2epF$s|mye=M4Wx%Ce<2^8+?OoEfP z$r`?IXD^fBY}~iAmr02BN4%f3A82|H3dQKMeLH)3O^$9C)0q^xH*+RYl%H9Qf>RS# z@honVrdi|Z)S#n*x8U$@vbr<}&GLN%>hoZdv(e`bMG$ziv2K@MzWq1>0g>oKABvc_ zIR7JzB2cogK9^GEgc&mwQZ^K&6wDW#ZW`A0<9?)>>4vmqH4wq=OacteZmNDeAhNSSpoR>F64L zXi4(JQXQg?LNRiT1l37Ple&Au-OxY;94K{_{z$-e_lCeh2$&o^(P6H;N81D(+AbvM z5HvbkLOytjMDHXnLntU_I;4)UE_T$xqYKNY=|uUllCZr}E7d(dW+4=1C=pJ4MDjxQ)&!E?m^7LUgb^t9Ru&Ts<1(l8kp*!x+xOsrOoP zUXMyH>)+_xrt#Nk6*dZiHTstyI;EyGv2He$c#!6CaWZMNu5V=EXEgrzf z0Z1Du0FjRl6o4Qm>1d#5MQKSG?I$K2IPsX_Kz~|+_TUEkBetoGh9c0z=2S$bc>ErH zl+Ccr^w6oi>`Ktc>z(QO>pB203AcyOV`SsWN72bm(TS?bMU)qja&mH#D5CHSeVmnL zq0)uoNGn|8zwL>aK){bL%Mtyo?4eM^`kfkbZRxF<&}T3{1Nj?| zuE7|rrH@tVqsOj zHqKJSN@qM;Qbzkp2Sv~fX>EXt=S-zD#zgROI!UR6p@e8j>uq;DHKrU7@jIga5g{Cr zCfTTL>$;hIAq_qMQ$sL>J!Sz?AZ*G#R z9fzeaBT6s_=QaTYxGyGvv{)LqOj)pWNLSZ_rIQvaYnEQ=>RR)D$HU4Er@UR28#;7x zb>fcA-33q1-34|RuVZGcJPXI2Z6!?Yk_ssb2bX`)=R(su6AjCK=kFBTtlDD1EOzMSARYF_Dv=I z_2f7EC~a!^d&)2Om^8SmAQLK8m6C=`akQq9C5_aR#$rUJgrI(;Zjg8p4@yhfZM$V& zEF@mC)tE7=k-z8jxBe;q|0uuy=#b>60Y&9_L&c%|MPLInFNPQl@95}28Cvua6SP?W z_Q<@5jH1koF7bly=}xctbuEX##)~doEoENl`qdu`ppa)?jN2`2uyJNHFL%-i4TDhJ zc!P~b5(K(|g^~T6T-b_8k35et*G|NxrFCs(Du$zu2HzR|HKK0>;n<2M`VQ2uzN0~8 zdROe<&d4}eW4^^V+4s^U%kUa_0P`cWO zt^aHtCl0>&Z1~qs&|-py&MUOEeV^2S1Qw`9U#`DUg62SXY-eSJe@pX?S5FL2?N489 z-Sxw^wkGW(s(3CxiWnJ?$E-g8hiM=pfy^h67dubZ~Rw~Lt zV7BUC?pWLje>KF0D7$o&($N%48%Wt;`tp6_Bx1ajfZUj6^yIe zP}6Z7+k+~xkwP_~`$Lpp;?h3!&7~o|$a(rQq7mbrdJcd4c>^?NUr_tVZ#z#-^o?{U zcdc}Ix|8olrXA>DjiVK|l&17gUmKGSYl}$nnHYt_@SU$f=|wGc>QHJIFAUp*q;~WY znA9#_&~J;Od?NfDdJjwj5HC;#dh<&vMTGKqy{N==vY|odk>0qawl6OAgG__mf^lkr zYusBMmU0XIF+=nbQpxK|Q~IxKb^KKmeMjq9KAfrXo|4|N($hb(`XC477UVB=8Kf4( zpXD%m=gFG4Iz&bdatr9*a&EymwIIn)-|C>RjnLAcJezP*(t29T+ih5)Vf{NR?}QTP zQbTA-g}rkN`nb{cn0T)K-$JQ;#*zb(qio>4p8Ykjr+vVi=v&mI+=2Lz9`0mvp$KeB z8PunL`BLkE>}5>jA6cNRSYOsh-+C;zLp~V#Mk#|k#M|ih-((WdfT4!`-i55$ezy-s ze)Ig2pWQGIVA%56?KVy|7IOySq!TS|m=`1r&dSP>1QmT===Lv^GjRVCGhRl-u(-A7 zgy&Xjf25mX@vv>CiN4|Uu=AaKzR~3j$nP}id=_5U*)L~+y`-CBA`z&z88${b_Pkv1 zJr~*>U-%HfJHzk=1(csP3ny?J$vTn3$=8ik(uw4)5(+1K%Y@UtAL&N28x&BU?VxX5 zxc!D9ba3Q8j;OShFj)Jh;nyN~mE-%+w{f{xf)nlEeYYWCD7jDCi|gsi*k49Se`So) zFE+g%!=YfGZtONL{X$O}Dd;1j8lokw*LUF@dL z%on!#A9Kfu4&2AGUR7eFw&bEWI&&DXULS3q`^LA}CV%7UQVdP>6>=XD1fp@A3TQtEha?2#0(ADXp~EXT>X#@ zCo?$oYq$3QiYjxXsFr-m7Y;tfO-oB(Wfj-Xu>nex{oBdpY(!&?{^i4n1}GK8m?&wK z^gt;mhmrg%>I||TW>!9J5hLWgG_eWS{rsuKC{B5Br^DkELw_xdXzyzt?yUHN?PL2=`bn1b=jC!3JAvZz=8R^TwV z5~^yA**(=3m*!SNI{Fpv7?ApCD`?vTL`UBq$ZWvTL|;#7a!2Ds+U^h|qM$ZqM`&lm z66@b6ekayTJ(=8^4RJK)?(g>eu_o)j^6L8~coWN$J!aEl19V&59TkFFR3IOtyB}{rXZp) zgTz zr?nU$~@-EV_t&qAusk`qi4sPZ5_xOl8RDFx}{qvU4k&Uh%nh+hfCU+6P=fG^{QUwIZGOFQ_ zm1~8`8`rWoMjlc1{`Y2WEB_dF;@2^pkn!piFiKMZZ+DEVez{{@SkM3d8)!Ct51)wI z2;Qz*4+Gb)0HZ6H!K+z2*q<*~3Eo9>*s~kw2rdD!;Aq!q=(gxQ+soy=FwbM6P@b%2 z6Qi7f-!NBr^guCl`Bx2J@X1wGQOd8{6{mc{Zk+R}UfBCQ802{l?#nC$zwF|Q;criQ z!|qRa25#4z3*MXE*tyehGYgw3;DVjQh1|4Z>@n>!xFTga=ymZv{K;Lx&M)^2dx`U` z&R&ox^m#jx9V!e5?YqRn-Dlnr1gE9!rf-w0_y3q2_Q`*zgZXo2z)!B<2LrwSfc-{f zz`e)QLFlNP|ni z&u^?1?!H^apPN+0Byamgcxvb`Y}H(Eq0bI)cJ80C)ys~?hTUt`t(q%4=p8otyF&2i zkRq7N`K!Y2zo2#X{+%7L;GW=@k|KDVoXED1xyfwy|B;^`JBjW6Xs+-=LOEF8xg2VD zy(oBYDP;Y9lh~5`V})zVQgFN9OK`_)x!SJB6snOgyaT|3a_H9M1{2rmD!+%nz&gKl zLHK1*0sJ#L7u<6Hg9CrO_B(v}Uv5zORZDQ-m-`_4hd*Ir>Km}#fHy#|ELZS|#sx<8 zj}My`9ABM2)g$a$sYmsCk65s}RWw|GYX*oO@iMG_oJ2BNY3u3~&wyXlvBUZz63lB5rTn_N(t`)G~w=5JspTJe{$A4PM+^hJ3@80q{ z)2S*o?E6)z)o;Ex9~}PAT$ngE8zcv&!6&}G4q}#Ehm+d1g{__1g4i}?Y)NpLFd*O> z6FTBLzpH#6m~de>JT<8|xUA?4yAL=CZ?rf8mM6r5B<(QhSN4|Baqe4e*6E*jF59M5 zeayOor88WhH+M``?Q~5Qb~NrV+w+6N!s;x)>NlVB3lpAwmcKhAj+wFBvpVln&#>&a zy|wekyr_Ds#;zDW#OHupdyuLA`9DmU+&w&?`-kmG zo9OF3Xmk0hxD9xUT})sth1!*ytq!%D^slG?hj8gI{hJH5@2ocn*EroPYBUtK*BV~$ zmy#sCnod`7by;8eZ@(-K2cx(eyPOuDU0jLGjCvBqCT zl&znwGFant_=~Kw2@4OV&|A}E`_t{aY{LA85(i?*(cJmSYDZ&6GKbl}8G7>SCi-%X zH1uBXXnY8QBw|AppXqd!sDJ8sLMfz`rF>0ayAc~B#pfk6W|L@}oWTFq{xNCj-j-z58vvu^t?z;&-)Am4Bwm8*W(>c(vJ+X^4~#|hB!e=7FM zh-*9BtaTC!TkYqgn-39MoZ1fJ{w!4e(Uxlm7X6mZI`#bzIP>9J_Je<~heIdV$1CnKD?2B^U2irAo+DeqF6FD3GaFuK?kF#-#&PF% z!k81Sg^VnJq1%vGV2{F0Ow1Go81{ukP_OO}+Y_*w5UT;Pkx2F9OGh49kn%!{50%&sLj;S1U>!r=wZ?Dep& z!bb`1*qNW!fNA&ogFZh5g7#15YTvrnAFzS@nJryD1s89=2)f*eR_zKK1V_CVB)s|E zC%|P+DsyS=Hsgws*&%P04MbCHnw>S~hegT&VCiKU?+ zaO!zbx_TF!+tQ156MAUpR{jgL`=}f!ud=YsQ+wgu*I5-f@F_o~>l~&qXC2ewqC(g( z5`xcGyYsI*zX`_vu?W79#Vv&^Ctm|67HdG!x1YiZzdX%+k)8&*Bu`KilCHXt`VAAX z!y6n+RI>XDl7zU=JOuE=VtA1agEtP&f^m6%!m0Pa1#P^JfxR8qtEOH4N!9b+Shzeq z8IFwH2frA$0=g_}4&w(d*Uk&+2lhX<2d?OFh8;a~7F(2e4D94GAl$$Gd3MjjS#Uwj zUAQ~=17UyQd~hiK9iD%DEW5pCFKFwe1b;sH2!AU`3yWVIA?5>r2RGHPvi}A_I}hd0 zhGz@sGF6c)RGnLFg`ts=AfRUmEPag!rPbSoeXCFN)hFh_r?zwKxet~wFR##oofkQn zu>K#6=cVm{8`eAsDkJx4kDXy)W?C)t&v%^w+ijrgXYW9M-ngaEIpcfy=)Qhnq2J5e zGuQfps(uXL+S!laF{Y)^Zo4}>uBMl;?w`}yF3mcF$Ij-!TOXBaom=mO&OSfEkk5Vj zjTP^LZ}#08SXsUToZ2>CyXA%}T(kWb)pvUqsb07}o`Z{$V!%w_9Xo&Y?Z*D|&?e?k z?cN=$H<$6}J4eGuM;&6)=geig7XDfzFEqQ9{&LtKHiJ(UB!W)CvIWZo!QGgnLdmMJ+`W_*=IYLhIBa zjKb}C;mD0G@Qr~-;IVJwS@k#Wuw~Cz*!I793gdDG#&y*ku&jL!oUv>Md=icjg4TWw zHk5S(Cqn#%EZ9o;V&+WP;hP@psfp+K-RhM>T>@~oC%{j@dOOy=ay9~77PSFbGOJufG~lJysuK1-aLUPm6$ z`llb`{hp74H})*!m)}|f-iy8jm3LGej65+@2++0>GW(RknG4&)tDZoZGyg-u=}T|+ z=ABwFA?y>d?X*8={o^l8!YCg0ePRkcRs19D-D)p0ve*d*md{lAR_NfGGyLI-9N{DQeR_!aHkKEBL>q?w?1ubJ9S zi6h|rU)u`-d+)+8cV33>8%7Qk_QtX01NO7ftpt_fM72 zdqv=SK4jWVJ*Pdr`GXyEIG5L8?v}YgbGLw>?DQD$h@1_$@795%fjgO#?q0%@jZi4x z3z&`TKT>&*aDz`h-X6}0&w%NN?!g_;e+ovQy$GI|+zc?o7K63JFY;sWLH_umA@J1r z7=A_hF5dU`p6vNcPCKW)9m>D)g%bLvzs`G|zYPMHwE|-{H5a(%GusI@dph$MV_2rJ z`#nA}dniBjkutd3Em=6^;tR%|IKp?@_Zd8Nq&x7qGLPRRd<1%Zbd(=_`W|RD;3ZHN zz6%`NI-9+a@*JD;zYap^fsw+?zt!?<-wI}gwl6Y6xc5PAuY6(oQ$2;Dckjcu-|Y_K zlUj>HC_$%+p&YYz*=jK4zuQzXQ+jA~zP!l1T|RWcr!}+qE5ll;LXYidT3_&hw@=Pg zefM-5_|BfNI%#QslZ(V3=6{^NitKV3u7ZshoAvH;H8%b+-=DUa z+vPe!zP_T20OG}`LV;h?y?K1;Bm!bDlKCbo6okkkHHgKHC5j420)<#SL+< zf>v91Q8M2I1VyE_h=Qnpr7GZ3aVaiUs@`CG?_ceiXU=oZ%z58;=9_um^G*HA0fnb+ z)J0+P(1^)GL7?DE=LAJsyvo_n!Fg%8^DqbJne$ZXs@Rn2^Hd3n?&iSQq%?)DJuNXd zRiS%#aUJH6;qc*Qk|~N$nV+1xSR6l3r4T32PfDL1t5U@-Hkqzc#Lr7fOIOX0Pd8Bt z|N6>Aos#fZw^Ae%gt~467nz=1xHK#* zEaaW;dViBi)8kf5RT^d}ja;IQ1OkChYQabkZ>4dDvWJV@XrW^Eti<#*rAeYN@xMZ* zE;6-HmMPQEOVkO>^!Z!B?7aZ_P(Y9#_tiv!u|ObwXUu!`u;(K!XKU^pHzs#O?^pC_ zE(ejvZ?JXBK(uO($MZ{?z&_E8l(ZT6ZLyW(jk485G0T&OUpL?{R|EOEEP%Y~p(bOM z>X{&4E5*XXA_=w~BNtcd(Is7<0`;yXRnoE8w(|tZ>hH%J%RYlTpZn}xKs;~pN8-Ks z7UUlK59F?vXMNUahPZyUqm*KLqvrQJ62PIpKorHwpU^A14RJmjamMA`l|i?;mr1HY*~p0WJV+Kr>!E;Q~Zi2hZ8or(v5c> z$kgoJv=t6_7~-nvAgFs%b1ESxm;A@kiF;k%0~4m~f!#N1h-T97aB##b`sbLbIJ|rv z%g^Y$#e3*<0r0pLUZ~U66(5;$kwW% zc}fslvfh_@7n?$B5GALLV|ZP?6MS~93vN&Ug6>@BLpD3R@|;K^1T7e%xhpf3TzBb- z68|h}v13gI9V~;s-#3EFw3fZ;)f@KwQi|SO1O~1xuvS_LuP?mOY*}l^tinuja!Umc zQSBp4UWkvLII@Ko)teYuB|}rsqmXGfj(RH_{Hh8c;{kbZYB@AX)B0Em-&{Wkwcekw zjxSc?g=s4!xd!{NQFKYOC(4ZGn%Gd`J&FtLa>&u`=4{LS`*1cP9=%T41Ke4Q&EH-H z*RA8wV3iS;^tnN6%cs#=?|kSb5`s1V6K<+ixx~wLBx)OTu%cCsLjN-wI?zut;LLC^ z(-X7ZKYdB{%qsB8nLqq?ls}hbIi9N6zCV*WrFyZpf-%&uZZRJ}#fA+{kg*=+Ubw2x4uy`m2<=J+$u+me7Le_FxU9cSMA~&$NL;JGF>evEk<*3^yct~EnmPQ7+lEXD|^hrN=7}9YS>pKpkiR)KX zU2HiU%Q6Q-pDRP?OZjNtF|ioDPSvqwY#b%CFrqOJ2J)GtsbXR2Z6cl)fWki{m{nkj zkN=^{+=HvplnKRj^xj7Jsc@LfwhXu!fW$Q|ZL@?TnVUK2q#U5n%nyV4-X z;A^z-&xXRi$elm2RO5u3tuUzznd53#>X@*a8D;h6QHddJa$`N)#}7c}=qm6%7si9t z-@^K>PO!6hJFG7X8D)0c0w?aMXX$Zr!?@Y8X^J#u&kwSTUb-|!rk|T=wnA=xI(;|E z*m4sm-!$cm`^RB=-2A`N6!rT&#Y@>cLuu}k_+fft(M^yk6XhkE=~54Ufq>ZC(+fR) zG4#O#c6X2s@A6+mEN;fJu|{QJ=pH2vzi^z@X*;Pk4b<*PTE*HnAp~#P$veX8C8nN1 zRH56F&#WV~uD|~Rg9G-0cWH?l?#((y?bC9of!8=NUUiG+SPzCyix8v>ohiG+~1)N{AjX#bY#Nzl4odu~yIN+)Q_t`vJTfZoeSQZg_ z(&#hP_IpZSZ9U0iPCVx)bM1NV+i}`1(}CIv*Hw_D_LLaf|4GHur*q4o$0UE;TslWy z1ez)(_jHV=_l1k#)YnOxusIvRct{7SX1D3{v8mW;{sxx)#~9B&e+N-d%Rm2|Wt_IssuO#;Yt~hYwKEi*^hBFN6Or(QsM8dhyxLpfQFf7Stq2Cv zUd_9{JB~Xq5bZ68ft)Sz)uj{3#IeqX-qLJhE3)NeU9Y|PQXHkN9ddv;zU|~8@<3_z zxopy)(Bk~PJE{9G`!zFu4&vb{skqEKLYlJgS6qLxRJZ>R730(PllL&p0!)gR(UR$#;gPw3E>&A; zhpmsImi@;e7j2}q4`ZbHIT`3@^9E}!CqS&Y9Ugle;^zC!amws%^zyv^r0*v6cHHe` z1RG}<($?=rOV91kXSG_w9VZ9Cw7pMw%iNR1IpjGVcFLX>Um7RPykIYV8=QvHcXKqO zR(5fVrWtg<=R@W`Ac=3bEkdiqD!TC9B>rspQnYIjNML0VM)bSK^p@V{Z)1|+&DVdT z@JAB}yz>@&k6HtL)hPzJ#3xdFf02cDyJ;W>hAu<*keTp|jh0s4@X%JS43d61W;?bR zoMXS5sQBeaTZog%1M=&(IPJyXaxum`9)k)WpqZ!_EcD6bzM&7;)Z5wEF7UvP9*q#K zk)d_&I$C@}DA|}{fTH^nsq3DL{F^Vt(pMP~M4f!ZN}4xODcOBmEZx@8$tz#wOU6%o z#WYMMxi_f|pB-x=4wJ(%y}6q98*>bv{zi0Xox~5V#^6 z;ivzU@K*NAP}+PfVXH&wPE)#KC-eME0gt|j7oq*X1p0Ric)p)QWszrSboiM46CEUUv`Chz z^IzShGxhUZEJV7hr>ou12IValhQiE$hsv>AEAy{ViMm4-AsX2&XQ*3^RFxuWzL#=v zhSEuH5Sx^m7^@ue!CzzOKL=VlK2@9^m-b(zx6(O7>5}*%#I+mpf026 1: print ("You have multi GPUs in a system: ") for idx, name in idxs_names_list: diff --git a/models/Model_DF/Model.py b/models/Model_DF/Model.py index 9d7ac1b..531b3b0 100644 --- a/models/Model_DF/Model.py +++ b/models/Model_DF/Model.py @@ -16,14 +16,14 @@ class Model(ModelBase): def onInitializeOptions(self, is_first_run, ask_override): if is_first_run or ask_override: def_pixel_loss = self.options.get('pixel_loss', False) - self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and remove face jitter.") + self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.") else: self.options['pixel_loss'] = self.options.get('pixel_loss', False) #override def onInitialize(self, **in_options): exec(nnlib.import_all(), locals(), globals()) - self.set_vram_batch_requirements( {4.5:4,5:6,6:8,7:16,8:24,9:24,10:32,11:32,12:32,13:48} ) + self.set_vram_batch_requirements( {4.5:4} ) ae_input_layer = Input(shape=(128, 128, 3)) mask_layer = Input(shape=(128, 128, 1)) #same as output diff --git a/models/Model_H128/Model.py b/models/Model_H128/Model.py index 98dcf6b..28e0016 100644 --- a/models/Model_H128/Model.py +++ b/models/Model_H128/Model.py @@ -24,14 +24,14 @@ class Model(ModelBase): if is_first_run or ask_override: def_pixel_loss = self.options.get('pixel_loss', False) - self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and remove face jitter.") + self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.") else: self.options['pixel_loss'] = self.options.get('pixel_loss', False) #override def onInitialize(self, **in_options): exec(nnlib.import_all(), locals(), globals()) - self.set_vram_batch_requirements( {2.5:2,3:2,4:2,4:4,5:8,6:12,7:16,8:16,9:24,10:24,11:32,12:32,13:48} ) + self.set_vram_batch_requirements( {2.5:4} ) bgr_shape, mask_shape, self.encoder, self.decoder_src, self.decoder_dst = self.Build( self.options['lighter_ae'] ) if not self.is_first_run(): diff --git a/models/Model_H64/Model.py b/models/Model_H64/Model.py index c2bb60b..39c7d25 100644 --- a/models/Model_H64/Model.py +++ b/models/Model_H64/Model.py @@ -24,15 +24,15 @@ class Model(ModelBase): if is_first_run or ask_override: def_pixel_loss = self.options.get('pixel_loss', False) - self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and remove face jitter.") + self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.") else: self.options['pixel_loss'] = self.options.get('pixel_loss', False) #override def onInitialize(self, **in_options): exec(nnlib.import_all(), locals(), globals()) - self.set_vram_batch_requirements( {1.5:2,2:2,3:8,4:16,5:24,6:32,7:40,8:48} ) - + self.set_vram_batch_requirements( {1.5:4} ) + bgr_shape, mask_shape, self.encoder, self.decoder_src, self.decoder_dst = self.Build(self.options['lighter_ae']) diff --git a/models/Model_LIAEF128/Model.py b/models/Model_LIAEF128/Model.py index f885f9b..e5674ea 100644 --- a/models/Model_LIAEF128/Model.py +++ b/models/Model_LIAEF128/Model.py @@ -17,14 +17,14 @@ class Model(ModelBase): def onInitializeOptions(self, is_first_run, ask_override): if is_first_run or ask_override: def_pixel_loss = self.options.get('pixel_loss', False) - self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and remove face jitter.") + self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.") else: self.options['pixel_loss'] = self.options.get('pixel_loss', False) #override def onInitialize(self, **in_options): exec(nnlib.import_all(), locals(), globals()) - self.set_vram_batch_requirements( {4.5:4,5:4,6:8,7:12,8:16,9:20,10:24,11:24,12:32,13:48} ) + self.set_vram_batch_requirements( {4.5:4} ) ae_input_layer = Input(shape=(128, 128, 3)) mask_layer = Input(shape=(128, 128, 1)) #same as output diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index 73569e6..26e8a64 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -29,30 +29,14 @@ class SAEModel(ModelBase): if is_first_run: self.options['resolution'] = input_int("Resolution (64,128 ?:help skip:128) : ", default_resolution, [64,128], help_message="More resolution requires more VRAM.") self.options['face_type'] = input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower() + self.options['learn_mask'] = input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted. Model with style values can be learned without mask and produce same quality result.") self.options['archi'] = input_str ("AE architecture (df, liae, ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae'], help_message="DF keeps faces more natural, while LIAE can fix overly different face shapes.").lower() - self.options['lighter_encoder'] = input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, sacrificing overall quality.") - self.options['learn_mask'] = input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Choose NO to reduce model size. In this case converter forced to use 'not predicted mask' that is not smooth as predicted. Styled SAE can learn without mask and produce same quality fake.") else: self.options['resolution'] = self.options.get('resolution', default_resolution) self.options['face_type'] = self.options.get('face_type', default_face_type) + self.options['learn_mask'] = self.options.get('learn_mask', True) self.options['archi'] = self.options.get('archi', default_archi) - self.options['lighter_encoder'] = self.options.get('lighter_encoder', False) - self.options['learn_mask'] = self.options.get('learn_mask', True) - - default_face_style_power = 10.0 - if is_first_run or ask_override: - default_face_style_power = default_face_style_power if is_first_run else self.options.get('face_style_power', default_face_style_power) - self.options['face_style_power'] = np.clip ( input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power, help_message="How fast NN will learn dst face style during generalization of src and dst faces. If style is learned good enough, set this value to 0.01 to prevent artifacts appearing."), 0.0, 100.0 ) - else: - self.options['face_style_power'] = self.options.get('face_style_power', default_face_style_power) - default_bg_style_power = 10.0 - if is_first_run or ask_override: - default_bg_style_power = default_bg_style_power if is_first_run else self.options.get('bg_style_power', default_bg_style_power) - self.options['bg_style_power'] = np.clip ( input_number("Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_bg_style_power), default_bg_style_power, help_message="How fast NN will learn dst background style during generalization of src and dst faces. If style is learned good enough, set this value to 0.1-0.3 to prevent artifacts appearing."), 0.0, 100.0 ) - else: - self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power) - default_ae_dims = 256 if self.options['archi'] == 'liae' else 512 default_ed_ch_dims = 42 if is_first_run: @@ -62,13 +46,36 @@ class SAEModel(ModelBase): self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims) self.options['ed_ch_dims'] = self.options.get('ed_ch_dims', default_ed_ch_dims) + if is_first_run: + self.options['lighter_encoder'] = input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.") + self.options['multiscale_decoder'] = input_bool ("Use multiscale decoder? (y/n, ?:help skip:y) : ", True, help_message="Multiscale decoder helps to get better details.") + else: + self.options['lighter_encoder'] = self.options.get('lighter_encoder', False) + self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', True) + + default_face_style_power = 0.0 + default_bg_style_power = 0.0 + if is_first_run or ask_override: + def_pixel_loss = self.options.get('pixel_loss', False) + self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 15-25k epochs to enhance fine details and decrease face jitter.") + default_face_style_power = default_face_style_power if is_first_run else self.options.get('face_style_power', default_face_style_power) + self.options['face_style_power'] = np.clip ( input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power, + help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k epochs, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes."), 0.0, 100.0 ) + + default_bg_style_power = default_bg_style_power if is_first_run else self.options.get('bg_style_power', default_bg_style_power) + self.options['bg_style_power'] = np.clip ( input_number("Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_bg_style_power), default_bg_style_power, + help_message="Learn to transfer image around face. This can make face more like dst."), 0.0, 100.0 ) + else: + self.options['pixel_loss'] = self.options.get('pixel_loss', False) + self.options['face_style_power'] = self.options.get('face_style_power', default_face_style_power) + self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power) #override def onInitialize(self, **in_options): exec(nnlib.import_all(), locals(), globals()) - self.set_vram_batch_requirements({2:1,3:2,4:3,5:6,6:8,7:12,8:16}) + self.set_vram_batch_requirements({1.5:4}) resolution = self.options['resolution'] ae_dims = self.options['ae_dims'] @@ -76,8 +83,10 @@ class SAEModel(ModelBase): adapt_k_size = False bgr_shape = (resolution, resolution, 3) mask_shape = (resolution, resolution, 1) - - dssim_pixel_alpha = Input( (1,) ) + + self.ms_count = ms_count = 3 if self.options['multiscale_decoder'] else 1 + + epoch_alpha = Input( (1,) ) warped_src = Input(bgr_shape) target_src = Input(bgr_shape) target_srcm = Input(mask_shape) @@ -85,7 +94,12 @@ class SAEModel(ModelBase): warped_dst = Input(bgr_shape) target_dst = Input(bgr_shape) target_dstm = Input(mask_shape) - + + target_src_ar = [ Input ( ( bgr_shape[0] // (2**i) ,)*2 + (bgr_shape[-1],) ) for i in range(ms_count-1, -1, -1)] + target_srcm_ar = [ Input ( ( mask_shape[0] // (2**i) ,)*2 + (mask_shape[-1],) ) for i in range(ms_count-1, -1, -1)] + target_dst_ar = [ Input ( ( bgr_shape[0] // (2**i) ,)*2 + (bgr_shape[-1],) ) for i in range(ms_count-1, -1, -1)] + target_dstm_ar = [ Input ( ( mask_shape[0] // (2**i) ,)*2 + (mask_shape[-1],) ) for i in range(ms_count-1, -1, -1)] + if self.options['archi'] == 'liae': self.encoder = modelify(SAEModel.LIAEEncFlow(resolution, adapt_k_size, self.options['lighter_encoder'], ed_ch_dims=ed_ch_dims) ) (Input(bgr_shape)) @@ -96,10 +110,10 @@ class SAEModel(ModelBase): inter_output_Inputs = [ Input( np.array(K.int_shape(x)[1:])*(1,1,2) ) for x in self.inter_B.outputs ] - self.decoder = modelify(SAEModel.LIAEDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_decoder=True)) (inter_output_Inputs) + self.decoder = modelify(SAEModel.LIAEDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_count=self.ms_count )) (inter_output_Inputs) if self.options['learn_mask']: - self.decoderm = modelify(SAEModel.LIAEDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5), multiscale_decoder=False )) (inter_output_Inputs) + self.decoderm = modelify(SAEModel.LIAEDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (inter_output_Inputs) if not self.is_first_run(): self.encoder.load_weights (self.get_strpath_storage_for_file(self.encoderH5)) @@ -128,20 +142,18 @@ class SAEModel(ModelBase): pred_src_srcm = self.decoderm(warped_src_inter_code) pred_dst_dstm = self.decoderm(warped_dst_inter_code) pred_src_dstm = self.decoderm(warped_src_dst_inter_code) - - else: self.encoder = modelify(SAEModel.DFEncFlow(resolution, adapt_k_size, self.options['lighter_encoder'], ae_dims=ae_dims, ed_ch_dims=ed_ch_dims) ) (Input(bgr_shape)) dec_Inputs = [ Input(K.int_shape(x)[1:]) for x in self.encoder.outputs ] - self.decoder_src = modelify(SAEModel.DFDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_decoder=True)) (dec_Inputs) - self.decoder_dst = modelify(SAEModel.DFDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_decoder=True)) (dec_Inputs) + self.decoder_src = modelify(SAEModel.DFDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_count=self.ms_count )) (dec_Inputs) + self.decoder_dst = modelify(SAEModel.DFDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_count=self.ms_count )) (dec_Inputs) if self.options['learn_mask']: - self.decoder_srcm = modelify(SAEModel.DFDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5), multiscale_decoder=False)) (dec_Inputs) - self.decoder_dstm = modelify(SAEModel.DFDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5), multiscale_decoder=False)) (dec_Inputs) + self.decoder_srcm = modelify(SAEModel.DFDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (dec_Inputs) + self.decoder_dstm = modelify(SAEModel.DFDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (dec_Inputs) if not self.is_first_run(): self.encoder.load_weights (self.get_strpath_storage_for_file(self.encoderH5)) @@ -166,19 +178,12 @@ class SAEModel(ModelBase): if self.options['learn_mask']: pred_src_srcm, pred_dst_dstm, pred_src_dstm = [ [x] if type(x) != list else x for x in [pred_src_srcm, pred_dst_dstm, pred_src_dstm] ] - - ms_count = len(pred_src_src) - - target_src_ar = [ target_src if i == 0 else tf.image.resize_bicubic( target_src, (resolution // (2**i) ,)*2 ) for i in range(ms_count-1, -1, -1)] - target_srcm_ar = [ target_srcm if i == 0 else tf.image.resize_bicubic( target_srcm, (resolution // (2**i) ,)*2 ) for i in range(ms_count-1, -1, -1)] - target_dst_ar = [ target_dst if i == 0 else tf.image.resize_bicubic( target_dst, (resolution // (2**i) ,)*2 ) for i in range(ms_count-1, -1, -1)] - target_dstm_ar = [ target_dstm if i == 0 else tf.image.resize_bicubic( target_dstm, (resolution // (2**i) ,)*2 ) for i in range(ms_count-1, -1, -1)] - target_srcm_blurred_ar = [ tf_gaussian_blur( max(1, x.get_shape().as_list()[1] // 32) )(x) for x in target_srcm_ar] + target_srcm_blurred_ar = [ gaussian_blur( max(1, K.int_shape(x)[1] // 32) )(x) for x in target_srcm_ar] target_srcm_sigm_ar = [ x / 2.0 + 0.5 for x in target_srcm_blurred_ar] target_srcm_anti_sigm_ar = [ 1.0 - x for x in target_srcm_sigm_ar] - target_dstm_blurred_ar = [ tf_gaussian_blur( max(1, x.get_shape().as_list()[1] // 32) )(x) for x in target_dstm_ar] + target_dstm_blurred_ar = [ gaussian_blur( max(1, K.int_shape(x)[1] // 32) )(x) for x in target_dstm_ar] target_dstm_sigm_ar = [ x / 2.0 + 0.5 for x in target_dstm_blurred_ar] target_dstm_anti_sigm_ar = [ 1.0 - x for x in target_dstm_sigm_ar] @@ -199,9 +204,7 @@ class SAEModel(ModelBase): if self.is_training_mode: def optimizer(): return Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) - - dssim_pixel_alpha_value = dssim_pixel_alpha[0][0] - + if self.options['archi'] == 'liae': src_dst_loss_train_weights = self.encoder.trainable_weights + self.inter_B.trainable_weights + self.inter_AB.trainable_weights + self.decoder.trainable_weights if self.options['learn_mask']: @@ -210,35 +213,51 @@ class SAEModel(ModelBase): src_dst_loss_train_weights = self.encoder.trainable_weights + self.decoder_src.trainable_weights + self.decoder_dst.trainable_weights if self.options['learn_mask']: src_dst_mask_loss_train_weights = self.encoder.trainable_weights + self.decoder_srcm.trainable_weights + self.decoder_dstm.trainable_weights - - src_dssim_loss_batch = sum([ ( 100*K.square(tf_dssim(2.0)( target_src_masked_ar[i], pred_src_src_sigm_ar[i] * target_srcm_sigm_ar[i] ) )) for i in range(len(target_src_masked_ar)) ]) - src_pixel_loss_batch = sum([ tf_reduce_mean ( 100*K.square( target_src_masked_ar[i] - pred_src_src_sigm_ar[i] * target_srcm_sigm_ar[i] ), axis=[1,2,3]) for i in range(len(target_src_masked_ar)) ]) - src_loss_batch = src_dssim_loss_batch*(1.0-dssim_pixel_alpha_value) + src_pixel_loss_batch*dssim_pixel_alpha_value + if not self.options['pixel_loss']: + src_loss_batch = sum([ ( 100*K.square( dssim(max_value=2.0)( target_src_masked_ar[i], pred_src_src_sigm_ar[i] * target_srcm_sigm_ar[i] ) )) for i in range(len(target_src_masked_ar)) ]) + else: + src_loss_batch = sum([ K.mean ( 100*K.square( target_src_masked_ar[i] - pred_src_src_sigm_ar[i] * target_srcm_sigm_ar[i] ), axis=[1,2,3]) for i in range(len(target_src_masked_ar)) ]) + src_loss = K.mean(src_loss_batch) - if self.options['face_style_power'] != 0: - face_style_power = self.options['face_style_power'] / 100.0 - src_loss += tf_style_loss(gaussian_blur_radius=resolution // 8, loss_weight=0.2*face_style_power)( psd_target_dst_masked_ar[-1], target_dst_masked_ar[-1] ) - - if self.options['bg_style_power'] != 0: - bg_style_power = self.options['bg_style_power'] / 100.0 - bg_dssim_loss = K.mean( (100*bg_style_power)*K.square(tf_dssim(2.0)( psd_target_dst_anti_masked_ar[-1], target_dst_anti_masked_ar[-1] ))) - bg_pixel_loss = K.mean( (100*bg_style_power)*K.square( psd_target_dst_anti_masked_ar[-1] - target_dst_anti_masked_ar[-1] )) - src_loss += bg_dssim_loss*(1.0-dssim_pixel_alpha_value) + bg_pixel_loss*dssim_pixel_alpha_value + face_style_power = self.options['face_style_power'] / 100.0 + + if face_style_power != 0: + src_loss += style_loss(gaussian_blur_radius=resolution//16, loss_weight=face_style_power, wnd_size=0)( psd_target_dst_masked_ar[-1], target_dst_masked_ar[-1] ) - dst_dssim_loss_batch = sum([ ( 100*K.square(tf_dssim(2.0)( target_dst_masked_ar[i], pred_dst_dst_sigm_ar[i] * target_dstm_sigm_ar[i] ) )) for i in range(len(target_dst_masked_ar)) ]) - dst_pixel_loss_batch = sum([ tf_reduce_mean ( 100*K.square( target_dst_masked_ar[i] - pred_dst_dst_sigm_ar[i] * target_dstm_sigm_ar[i] ), axis=[1,2,3]) for i in range(len(target_dst_masked_ar)) ]) - dst_loss_batch = dst_dssim_loss_batch*(1.0-dssim_pixel_alpha_value) + dst_pixel_loss_batch*dssim_pixel_alpha_value + bg_style_power = self.options['bg_style_power'] / 100.0 + if bg_style_power != 0: + if not self.options['pixel_loss']: + bg_loss = K.mean( (100*bg_style_power)*K.square(dssim(max_value=2.0)( psd_target_dst_anti_masked_ar[-1], target_dst_anti_masked_ar[-1] ))) + else: + bg_loss = K.mean( (100*bg_style_power)*K.square( psd_target_dst_anti_masked_ar[-1] - target_dst_anti_masked_ar[-1] )) + src_loss += bg_loss + + if not self.options['pixel_loss']: + dst_loss_batch = sum([ ( 100*K.square(dssim(max_value=2.0)( target_dst_masked_ar[i], pred_dst_dst_sigm_ar[i] * target_dstm_sigm_ar[i] ) )) for i in range(len(target_dst_masked_ar)) ]) + else: + dst_loss_batch = sum([ K.mean ( 100*K.square( target_dst_masked_ar[i] - pred_dst_dst_sigm_ar[i] * target_dstm_sigm_ar[i] ), axis=[1,2,3]) for i in range(len(target_dst_masked_ar)) ]) + dst_loss = K.mean(dst_loss_batch) - self.src_dst_train = K.function ([dssim_pixel_alpha, warped_src, target_src, target_srcm, warped_dst, target_dst, target_dstm ],[src_loss,dst_loss,src_loss_batch,dst_loss_batch], optimizer().get_updates(src_loss+dst_loss, src_dst_loss_train_weights) ) - - + feed = [warped_src, warped_dst] + feed += target_src_ar[::-1] + feed += target_srcm_ar[::-1] + feed += target_dst_ar[::-1] + feed += target_dstm_ar[::-1] + + self.src_dst_train = K.function (feed,[src_loss,dst_loss], optimizer().get_updates(src_loss+dst_loss, src_dst_loss_train_weights) ) + if self.options['learn_mask']: src_mask_loss = sum([ K.mean(K.square(target_srcm_ar[-1]-pred_src_srcm[-1])) for i in range(len(target_srcm_ar)) ]) dst_mask_loss = sum([ K.mean(K.square(target_dstm_ar[-1]-pred_dst_dstm[-1])) for i in range(len(target_dstm_ar)) ]) - self.src_dst_mask_train = K.function ([warped_src, target_srcm, warped_dst, target_dstm],[src_mask_loss, dst_mask_loss], optimizer().get_updates(src_mask_loss+dst_mask_loss, src_dst_mask_loss_train_weights) ) + + feed = [ warped_src, warped_dst] + feed += target_srcm_ar[::-1] + feed += target_dstm_ar[::-1] + + self.src_dst_mask_train = K.function (feed,[src_mask_loss, dst_mask_loss], optimizer().get_updates(src_mask_loss+dst_mask_loss, src_dst_mask_loss_train_weights) ) if self.options['learn_mask']: self.AE_view = K.function ([warped_src, warped_dst], [pred_src_src[-1], pred_dst_dst[-1], pred_src_dst[-1], pred_src_dstm[-1]]) @@ -257,21 +276,20 @@ class SAEModel(ModelBase): f = SampleProcessor.TypeFlags face_type = f.FACE_ALIGN_FULL if self.options['face_type'] == 'f' else f.FACE_ALIGN_HALF + + output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution] ] + output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_BGR, resolution // (2**i) ] for i in range(ms_count)] + output_sample_types += [ [f.TRANSFORMED | face_type | f.MODE_M | f.FACE_MASK_FULL, resolution // (2**i) ] for i in range(ms_count)] + self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, sort_by_yaw_target_samples_path=self.training_data_dst_path if self.sort_by_yaw else None, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, normalize_tanh = True, scale_range=np.array([-0.05, 0.05])+self.src_scale_mod / 100.0 ), - output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution], - [f.TRANSFORMED | face_type | f.MODE_BGR, resolution], - [f.TRANSFORMED | face_type | f.MODE_M | f.FACE_MASK_FULL, resolution] - ], add_sample_idx=True ), + output_sample_types=output_sample_types ), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=self.random_flip, normalize_tanh = True), - output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR, resolution], - [f.TRANSFORMED | face_type | f.MODE_BGR, resolution], - [f.TRANSFORMED | face_type | f.MODE_M | f.FACE_MASK_FULL, resolution] - ], add_sample_idx=True ) + output_sample_types=output_sample_types ) ]) #override def onSave(self): @@ -297,17 +315,20 @@ class SAEModel(ModelBase): #override def onTrainOneEpoch(self, generators_samples, generators_list): - warped_src, target_src, target_src_mask, src_sample_idxs = generators_samples[0] - warped_dst, target_dst, target_dst_mask, dst_sample_idxs = generators_samples[1] + src_samples = generators_samples[0] + dst_samples = generators_samples[1] - dssim_pixel_alpha = np.clip ( (self.epoch - 5000) / 15000.0, 0.0, 1.0 ) #smooth transition between DSSIM and MSE in 5-20k epochs - dssim_pixel_alpha = np.repeat( dssim_pixel_alpha, (self.batch_size,) ) - dssim_pixel_alpha = np.expand_dims(dssim_pixel_alpha,-1) - - src_loss, dst_loss, src_sample_losses, dst_sample_losses = self.src_dst_train ([dssim_pixel_alpha, warped_src, target_src, target_src_mask, warped_dst, target_dst, target_dst_mask]) + feed = [src_samples[0], dst_samples[0] ] + \ + src_samples[1:1+self.ms_count*2] + \ + dst_samples[1:1+self.ms_count*2] + + src_loss, dst_loss, = self.src_dst_train (feed) if self.options['learn_mask']: - src_mask_loss, dst_mask_loss, = self.src_dst_mask_train ([warped_src, target_src_mask, warped_dst, target_dst_mask]) + feed = [ src_samples[0], dst_samples[0] ] + \ + src_samples[1+self.ms_count:1+self.ms_count*2] + \ + dst_samples[1+self.ms_count:1+self.ms_count*2] + src_mask_loss, dst_mask_loss, = self.src_dst_mask_train (feed) return ( ('src_loss', src_loss), ('dst_loss', dst_loss) ) @@ -430,7 +451,7 @@ class SAEModel(ModelBase): return func @staticmethod - def LIAEDecFlow(output_nc,ed_ch_dims=21, multiscale_decoder=True): + def LIAEDecFlow(output_nc,ed_ch_dims=21, multiscale_count=1): exec (nnlib.import_all(), locals(), globals()) ed_dims = output_nc * ed_ch_dims @@ -449,12 +470,12 @@ class SAEModel(ModelBase): outputs = [] x1 = upscale(ed_dims*8)( x ) - if multiscale_decoder: + if multiscale_count >= 3: outputs += [ to_bgr() ( x1 ) ] x2 = upscale(ed_dims*4)( x1 ) - if multiscale_decoder: + if multiscale_count >= 2: outputs += [ to_bgr() ( x2 ) ] x3 = upscale(ed_dims*2)( x2 ) @@ -513,7 +534,7 @@ class SAEModel(ModelBase): return func @staticmethod - def DFDecFlow(output_nc, ed_ch_dims=21, multiscale_decoder=True): + def DFDecFlow(output_nc, ed_ch_dims=21, multiscale_count=1): exec (nnlib.import_all(), locals(), globals()) ed_dims = output_nc * ed_ch_dims @@ -535,12 +556,12 @@ class SAEModel(ModelBase): outputs = [] x1 = upscale(ed_dims*8)( x ) - if multiscale_decoder: + if multiscale_count >= 3: outputs += [ to_bgr() ( x1 ) ] x2 = upscale(ed_dims*4)( x1 ) - if multiscale_decoder: + if multiscale_count >= 2: outputs += [ to_bgr() ( x2 ) ] x3 = upscale(ed_dims*2)( x2 ) diff --git a/nnlib/device.py b/nnlib/device.py new file mode 100644 index 0000000..fb25278 --- /dev/null +++ b/nnlib/device.py @@ -0,0 +1,333 @@ +import os +import json +import numpy as np +from .pynvml import * + +tf_min_req_cap = 37 #min req compute capability for tensorflow-gpu==1.11.0 + +class device: + backend = None + class Config(): + force_gpu_idx = -1 + multi_gpu = False + force_gpu_idxs = None + choose_worst_gpu = False + gpu_idxs = [] + gpu_names = [] + gpu_compute_caps = [] + gpu_vram_gb = [] + allow_growth = True + use_fp16 = False + cpu_only = False + backend = None + def __init__ (self, force_gpu_idx = -1, + multi_gpu = False, + force_gpu_idxs = None, + choose_worst_gpu = False, + allow_growth = True, + use_fp16 = False, + cpu_only = False, + **in_options): + + self.backend = device.backend + self.use_fp16 = use_fp16 + self.cpu_only = cpu_only + + if not self.cpu_only: + self.cpu_only = (self.backend == "tensorflow-cpu") + + if not self.cpu_only: + self.force_gpu_idx = force_gpu_idx + self.multi_gpu = multi_gpu + self.force_gpu_idxs = force_gpu_idxs + self.choose_worst_gpu = choose_worst_gpu + self.allow_growth = allow_growth + + self.gpu_idxs = [] + + if force_gpu_idxs is not None: + for idx in force_gpu_idxs.split(','): + idx = int(idx) + if device.isValidDeviceIdx(idx): + self.gpu_idxs.append(idx) + else: + gpu_idx = force_gpu_idx if (force_gpu_idx >= 0 and device.isValidDeviceIdx(force_gpu_idx)) else device.getBestValidDeviceIdx() if not choose_worst_gpu else device.getWorstValidDeviceIdx() + if gpu_idx != -1: + if self.multi_gpu: + self.gpu_idxs = device.getDeviceIdxsEqualModel( gpu_idx ) + if len(self.gpu_idxs) <= 1: + self.multi_gpu = False + else: + self.gpu_idxs = [gpu_idx] + + self.cpu_only = (len(self.gpu_idxs) == 0) + + if not self.cpu_only: + self.gpu_names = [] + self.gpu_compute_caps = [] + self.gpu_vram_gb = [] + for gpu_idx in self.gpu_idxs: + self.gpu_names += [device.getDeviceName(gpu_idx)] + self.gpu_compute_caps += [ device.getDeviceComputeCapability(gpu_idx) ] + self.gpu_vram_gb += [ device.getDeviceVRAMTotalGb(gpu_idx) ] + self.cpu_only = (len(self.gpu_idxs) == 0) + + if self.cpu_only: + self.backend = "tensorflow-cpu" + + @staticmethod + def getValidDeviceIdxsEnumerator(): + if device.backend == "plaidML": + for i in range(plaidML_devices_count): + yield i + elif device.backend == "tensorflow": + for gpu_idx in range(nvmlDeviceGetCount()): + cap = device.getDeviceComputeCapability (gpu_idx) + if cap >= tf_min_req_cap: + yield gpu_idx + elif device.backend == "tensorflow-generic": + yield 0 + + + @staticmethod + def getValidDevicesWithAtLeastTotalMemoryGB(totalmemsize_gb): + result = [] + if device.backend == "plaidML": + for i in device.getValidDeviceIdxsEnumerator(): + if plaidML_devices[i]['globalMemSize'] >= totalmemsize_gb*1024*1024*1024: + result.append (i) + elif device.backend == "tensorflow": + for i in device.getValidDeviceIdxsEnumerator(): + handle = nvmlDeviceGetHandleByIndex(i) + memInfo = nvmlDeviceGetMemoryInfo( handle ) + if (memInfo.total) >= totalmemsize_gb*1024*1024*1024: + result.append (i) + elif device.backend == "tensorflow-generic": + return [0] + + return result + + @staticmethod + def getAllDevicesIdxsList(): + if device.backend == "plaidML": + return [ *range(plaidML_devices_count) ] + elif device.backend == "tensorflow": + return [ *range(nvmlDeviceGetCount() ) ] + elif device.backend == "tensorflow-generic": + return [0] + + @staticmethod + def getValidDevicesIdxsWithNamesList(): + if device.backend == "plaidML": + return [ (i, plaidML_devices[i]['description'] ) for i in device.getValidDeviceIdxsEnumerator() ] + elif device.backend == "tensorflow": + return [ (i, nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() ) for i in device.getValidDeviceIdxsEnumerator() ] + elif device.backend == "tensorflow-cpu": + return [ (0, 'CPU') ] + elif device.backend == "tensorflow-generic": + return [ (0, device.getDeviceName(0) ) ] + + @staticmethod + def getDeviceVRAMTotalGb (idx): + if device.backend == "plaidML": + if idx < plaidML_devices_count: + return plaidML_devices[idx]['globalMemSize'] / (1024*1024*1024) + elif device.backend == "tensorflow": + if idx < nvmlDeviceGetCount(): + memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(idx) ) + return round ( memInfo.total / (1024*1024*1024) ) + + return 0 + elif device.backend == "tensorflow-generic": + return 2 + + @staticmethod + def getBestValidDeviceIdx(): + if device.backend == "plaidML": + idx = -1 + idx_mem = 0 + for i in device.getValidDeviceIdxsEnumerator(): + total = plaidML_devices[i]['globalMemSize'] + if total > idx_mem: + idx = i + idx_mem = total + + return idx + elif device.backend == "tensorflow": + idx = -1 + idx_mem = 0 + for i in device.getValidDeviceIdxsEnumerator(): + memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(i) ) + if memInfo.total > idx_mem: + idx = i + idx_mem = memInfo.total + + return idx + elif device.backend == "tensorflow-generic": + return 0 + + @staticmethod + def getWorstValidDeviceIdx(): + if device.backend == "plaidML": + idx = -1 + idx_mem = sys.maxsize + for i in device.getValidDeviceIdxsEnumerator(): + total = plaidML_devices[i]['globalMemSize'] + if total < idx_mem: + idx = i + idx_mem = total + + return idx + elif device.backend == "tensorflow": + idx = -1 + idx_mem = sys.maxsize + for i in device.getValidDeviceIdxsEnumerator(): + memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(i) ) + if memInfo.total < idx_mem: + idx = i + idx_mem = memInfo.total + + return idx + elif device.backend == "tensorflow-generic": + return 0 + + @staticmethod + def isValidDeviceIdx(idx): + if device.backend == "plaidML": + return idx in [*device.getValidDeviceIdxsEnumerator()] + elif device.backend == "tensorflow": + return idx in [*device.getValidDeviceIdxsEnumerator()] + elif device.backend == "tensorflow-generic": + return (idx == 0) + + @staticmethod + def getDeviceIdxsEqualModel(idx): + if device.backend == "plaidML": + result = [] + idx_name = plaidML_devices[idx]['description'] + for i in device.getValidDeviceIdxsEnumerator(): + if plaidML_devices[i]['description'] == idx_name: + result.append (i) + + return result + elif device.backend == "tensorflow": + result = [] + idx_name = nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode() + for i in device.getValidDeviceIdxsEnumerator(): + if nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() == idx_name: + result.append (i) + + return result + elif device.backend == "tensorflow-generic": + return [0] if idx == 0 else [] + + @staticmethod + def getDeviceName (idx): + if device.backend == "plaidML": + if idx < plaidML_devices_count: + return plaidML_devices[idx]['description'] + elif device.backend == "tensorflow": + if idx < nvmlDeviceGetCount(): + return nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode() + elif device.backend == "tensorflow-generic": + if idx == 0: + return "Generic GeForce GPU" + + return None + + @staticmethod + def getDeviceID (idx): + if device.backend == "plaidML": + if idx < plaidML_devices_count: + return plaidML_devices[idx]['id'].decode() + + return None + + @staticmethod + def getDeviceComputeCapability(idx): + result = 0 + if device.backend == "plaidML": + return 99 + elif device.backend == "tensorflow": + if idx < nvmlDeviceGetCount(): + result = nvmlDeviceGetCudaComputeCapability(nvmlDeviceGetHandleByIndex(idx)) + elif device.backend == "tensorflow-generic": + return 99 if idx == 0 else 0 + + return result[0] * 10 + result[1] + + +force_plaidML = os.environ.get("force_plaidML", "0") == "1" +has_nvml = False +has_nvml_cap = False +has_nvidia_device = False +plaidML_devices = [] + +# Using plaidML OpenCL backend to determine system devices and has_nvidia_device +try: + os.environ['PLAIDML_EXPERIMENTAL'] = 'false' #this enables work plaidML without run 'plaidml-setup' + import plaidml + ctx = plaidml.Context() + for d in plaidml.devices(ctx, return_all=True)[0]: + details = json.loads(d.details) + if 'nvidia' in details['vendor'].lower(): + has_nvidia_device = True + plaidML_devices += [ {'id':d.id, + 'globalMemSize' : int(details['globalMemSize']), + 'description' : d.description.decode() + }] + ctx.shutdown() +except: + pass + +plaidML_devices_count = len(plaidML_devices) + +#choosing backend + +if device.backend is None: + #first trying to load NVSMI and detect CUDA devices for tensorflow backend, + #even force_plaidML is choosed, because if plaidML will fail, we can choose tensorflow + try: + nvmlInit() + has_nvml = True + device.backend = "tensorflow" #set tensorflow backend in order to use device.*device() functions + + gpu_idxs = device.getAllDevicesIdxsList() + gpu_caps = np.array ( [ device.getDeviceComputeCapability(gpu_idx) for gpu_idx in gpu_idxs ] ) + + if len ( np.ndarray.flatten ( np.argwhere (gpu_caps >= tf_min_req_cap) ) ) == 0: + if not force_plaidML: + print ("No CUDA devices found with minimum required compute capability: %d.%d. Falling back to OpenCL mode." % (tf_min_req_cap // 10, tf_min_req_cap % 10) ) + device.backend = None + nvmlShutdown() + else: + has_nvml_cap = True + except: + #if no NVSMI installed exception will occur + device.backend = None + has_nvml = False + +if device.backend is None or force_plaidML: + #tensorflow backend was failed or forcing plaidML, trying to use plaidML backend + if plaidML_devices_count == 0: + print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.") + device.backend = None + else: + device.backend = "plaidML" + +if device.backend is None: + if not has_nvml: + if has_nvidia_device: + #some notebook systems have NVIDIA card without NVSMI in official drivers + #in that case considering we have system with one capable GPU and let tensorflow to choose best GPU + device.backend = "tensorflow-generic" + else: + #no NVSMI and no NVIDIA cards, also plaidML was failed, then CPU only + device.backend = "tensorflow-cpu" + else: + if has_nvml_cap: + #has NVSMI and capable CUDA-devices, but force_plaidML was failed, then we choosing tensorflow + device.backend = "tensorflow" + else: + #has NVSMI, no capable CUDA-devices, also plaidML was failed, then CPU only + device.backend = "tensorflow-cpu" diff --git a/nnlib/devicelib.py b/nnlib/devicelib.py deleted file mode 100644 index 1d516fe..0000000 --- a/nnlib/devicelib.py +++ /dev/null @@ -1,186 +0,0 @@ -from .pynvml import * - -try: - nvmlInit() - hasNVML = True -except: - hasNVML = False - -class devicelib: - class Config(): - force_gpu_idx = -1 - multi_gpu = False - force_gpu_idxs = None - choose_worst_gpu = False - gpu_idxs = [] - gpu_names = [] - gpu_compute_caps = [] - gpu_vram_gb = [] - allow_growth = True - use_fp16 = False - cpu_only = False - - def __init__ (self, force_gpu_idx = -1, - multi_gpu = False, - force_gpu_idxs = None, - choose_worst_gpu = False, - allow_growth = True, - use_fp16 = False, - cpu_only = False, - **in_options): - - self.use_fp16 = use_fp16 - if cpu_only: - self.cpu_only = True - else: - self.force_gpu_idx = force_gpu_idx - self.multi_gpu = multi_gpu - self.force_gpu_idxs = force_gpu_idxs - self.choose_worst_gpu = choose_worst_gpu - self.allow_growth = allow_growth - - self.gpu_idxs = [] - - if force_gpu_idxs is not None: - for idx in force_gpu_idxs.split(','): - idx = int(idx) - if devicelib.isValidDeviceIdx(idx): - self.gpu_idxs.append(idx) - else: - gpu_idx = force_gpu_idx if (force_gpu_idx >= 0 and devicelib.isValidDeviceIdx(force_gpu_idx)) else devicelib.getBestDeviceIdx() if not choose_worst_gpu else devicelib.getWorstDeviceIdx() - if gpu_idx != -1: - if self.multi_gpu: - self.gpu_idxs = devicelib.getDeviceIdxsEqualModel( gpu_idx ) - if len(self.gpu_idxs) <= 1: - self.multi_gpu = False - else: - self.gpu_idxs = [gpu_idx] - - self.cpu_only = (len(self.gpu_idxs) == 0) - - if not self.cpu_only: - self.gpu_names = [] - self.gpu_compute_caps = [] - for gpu_idx in self.gpu_idxs: - self.gpu_names += [devicelib.getDeviceName(gpu_idx)] - self.gpu_compute_caps += [ devicelib.getDeviceComputeCapability ( gpu_idx ) ] - self.gpu_vram_gb += [ devicelib.getDeviceVRAMTotalGb ( gpu_idx ) ] - - @staticmethod - def getDevicesWithAtLeastTotalMemoryGB(totalmemsize_gb): - if not hasNVML: - return [0] - - result = [] - for i in range(nvmlDeviceGetCount()): - handle = nvmlDeviceGetHandleByIndex(i) - memInfo = nvmlDeviceGetMemoryInfo( handle ) - if (memInfo.total) >= totalmemsize_gb*1024*1024*1024: - result.append (i) - return result - - @staticmethod - def getAllDevicesIdxsList(): - if not hasNVML: - return [0] - - return [ i for i in range(0, nvmlDeviceGetCount() ) ] - - @staticmethod - def getAllDevicesIdxsWithNamesList(): - if not hasNVML: - return [ (0, devicelib.getDeviceName(0) ) ] - - return [ (i, nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() ) for i in range(nvmlDeviceGetCount() ) ] - - @staticmethod - def getDeviceVRAMFree (idx): - if not hasNVML: - return 2 - - if idx < nvmlDeviceGetCount(): - memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(idx) ) - return memInfo.total - memInfo.used - - return 0 - - @staticmethod - def getDeviceVRAMTotalGb (idx): - if not hasNVML: - return 2 - - if idx < nvmlDeviceGetCount(): - memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(idx) ) - return round ( memInfo.total / (1024*1024*1024) ) - - return 0 - - @staticmethod - def getBestDeviceIdx(): - if not hasNVML: - return 0 - - idx = -1 - idx_mem = 0 - for i in range( nvmlDeviceGetCount() ): - memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(i) ) - if memInfo.total > idx_mem: - idx = i - idx_mem = memInfo.total - - return idx - - @staticmethod - def getWorstDeviceIdx(): - if not hasNVML: - return 0 - - idx = -1 - idx_mem = sys.maxsize - for i in range( nvmlDeviceGetCount() ): - memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(i) ) - if memInfo.total < idx_mem: - idx = i - idx_mem = memInfo.total - - return idx - - @staticmethod - def isValidDeviceIdx(idx): - if not hasNVML: - return (idx == 0) - - return (idx < nvmlDeviceGetCount()) - - @staticmethod - def getDeviceIdxsEqualModel(idx): - if not hasNVML: - return [0] if idx == 0 else [] - - result = [] - idx_name = nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode() - for i in range( nvmlDeviceGetCount() ): - if nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() == idx_name: - result.append (i) - - return result - - @staticmethod - def getDeviceName (idx): - if not hasNVML: - return 'Generic GeForce GPU' - - if idx < nvmlDeviceGetCount(): - return nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode() - - return None - - @staticmethod - def getDeviceComputeCapability(idx): - if not hasNVML: - return 99 if idx == 0 else 0 - - result = 0 - if idx < nvmlDeviceGetCount(): - result = nvmlDeviceGetCudaComputeCapability(nvmlDeviceGetHandleByIndex(idx)) - return result[0] * 10 + result[1] diff --git a/nnlib/nnlib.py b/nnlib/nnlib.py index 7a92169..b6f10e9 100644 --- a/nnlib/nnlib.py +++ b/nnlib/nnlib.py @@ -4,66 +4,37 @@ import contextlib import numpy as np from utils import std_utils -from .devicelib import devicelib +from .device import device class nnlib(object): - device = devicelib #forwards nnlib.devicelib to device in order to use nnlib as standalone lib - DeviceConfig = devicelib.Config + device = device #forwards nnlib.devicelib to device in order to use nnlib as standalone lib + DeviceConfig = device.Config active_DeviceConfig = DeviceConfig() #default is one best GPU dlib = None + keras = None keras_contrib = None + tf = None tf_sess = None - code_import_tf = None + PML = None + PMLK = None + PMLTile= None + code_import_keras = None code_import_keras_contrib = None code_import_all = None code_import_dlib = None - tf_dssim = None - tf_ssim = None - tf_resize_like = None - tf_image_histogram = None - tf_rgb_to_lab = None - tf_lab_to_rgb = None - tf_adain = None - tf_gaussian_blur = None - tf_style_loss = None - - modelify = None - ReflectionPadding2D = None - DSSIMLoss = None - DSSIMMSEMaskLoss = None - PixelShuffler = None - SubpixelUpscaler = None - AddUniformNoise = None ResNet = None UNet = None UNetTemporalPredictor = None NLayerDiscriminator = None - - code_import_tf_string = \ -""" -tf = nnlib.tf -tf_sess = nnlib.tf_sess -tf_reduce_mean = tf.reduce_mean # todo tf 12+ = tf.math.reduce_mean -tf_total_variation = tf.image.total_variation -tf_dssim = nnlib.tf_dssim -tf_ssim = nnlib.tf_ssim -tf_resize_like = nnlib.tf_resize_like -tf_image_histogram = nnlib.tf_image_histogram -tf_rgb_to_lab = nnlib.tf_rgb_to_lab -tf_lab_to_rgb = nnlib.tf_lab_to_rgb -tf_adain = nnlib.tf_adain -tf_gaussian_blur = nnlib.tf_gaussian_blur -tf_style_loss = nnlib.tf_style_loss -""" code_import_keras_string = \ """ keras = nnlib.keras @@ -81,9 +52,11 @@ BatchNormalization = keras.layers.BatchNormalization LeakyReLU = keras.layers.LeakyReLU ReLU = keras.layers.ReLU +PReLU = keras.layers.PReLU tanh = keras.layers.Activation('tanh') sigmoid = keras.layers.Activation('sigmoid') Dropout = keras.layers.Dropout +Softmax = keras.layers.Softmax Lambda = keras.layers.Lambda Add = keras.layers.Add @@ -100,12 +73,14 @@ Model = keras.models.Model Adam = keras.optimizers.Adam modelify = nnlib.modelify -ReflectionPadding2D = nnlib.ReflectionPadding2D -DSSIMLoss = nnlib.DSSIMLoss -DSSIMMSEMaskLoss = nnlib.DSSIMMSEMaskLoss +gaussian_blur = nnlib.gaussian_blur +style_loss = nnlib.style_loss +dssim = nnlib.dssim + +#ReflectionPadding2D = nnlib.ReflectionPadding2D PixelShuffler = nnlib.PixelShuffler SubpixelUpscaler = nnlib.SubpixelUpscaler -AddUniformNoise = nnlib.AddUniformNoise +#AddUniformNoise = nnlib.AddUniformNoise """ code_import_keras_contrib_string = \ """ @@ -113,7 +88,6 @@ keras_contrib = nnlib.keras_contrib GroupNormalization = keras_contrib.layers.GroupNormalization InstanceNormalization = keras_contrib.layers.InstanceNormalization Padam = keras_contrib.optimizers.Padam -PELU = keras_contrib.layers.advanced_activations.PELU """ code_import_dlib_string = \ """ @@ -122,6 +96,7 @@ dlib = nnlib.dlib code_import_all_string = \ """ +DSSIMMSEMaskLoss = nnlib.DSSIMMSEMaskLoss ResNet = nnlib.ResNet UNet = nnlib.UNet UNetTemporalPredictor = nnlib.UNetTemporalPredictor @@ -130,7 +105,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator @staticmethod - def import_tf(device_config = None): + def _import_tf(device_config): if nnlib.tf is not None: return nnlib.code_import_tf @@ -147,263 +122,63 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator import tensorflow as tf nnlib.tf = tf - if device_config is None: - device_config = nnlib.active_DeviceConfig - - tf_ver = [int(x) for x in tf.VERSION.split('.')] - req_cap = 35 - if tf_ver[0] > 1 or (tf_ver[0] == 1 and tf_ver[1] >= 11): - req_cap = 37 - - if not device_config.cpu_only and device_config.gpu_compute_caps[0] < req_cap: - if suppressor is not None: - suppressor.__exit__() - - print ("%s does not meet minimum required compute capability: %d.%d. Falling back to CPU mode." % ( device_config.gpu_names[0], req_cap // 10, req_cap % 10 ) ) - device_config = nnlib.DeviceConfig(cpu_only=True) - - if suppressor is not None: - suppressor.__enter__() - - nnlib.active_DeviceConfig = device_config - if device_config.cpu_only: - config = tf.ConfigProto( device_count = {'GPU': 0} ) - else: + config = tf.ConfigProto(device_count={'GPU': 0}) + else: config = tf.ConfigProto() - visible_device_list = '' - for idx in device_config.gpu_idxs: - visible_device_list += str(idx) + ',' - config.gpu_options.visible_device_list=visible_device_list[:-1] + + if device_config.backend != "tensorflow-generic": + #tensorflow-generic is system with NVIDIA card, but w/o NVSMI + #so dont hide devices and let tensorflow to choose best card + visible_device_list = '' + for idx in device_config.gpu_idxs: + visible_device_list += str(idx) + ',' + config.gpu_options.visible_device_list=visible_device_list[:-1] config.gpu_options.force_gpu_compatible = True config.gpu_options.allow_growth = device_config.allow_growth - + nnlib.tf_sess = tf.Session(config=config) if suppressor is not None: suppressor.__exit__() - - nnlib.__initialize_tf_functions() - nnlib.code_import_tf = compile (nnlib.code_import_tf_string,'','exec') - return nnlib.code_import_tf - @staticmethod - def __initialize_tf_functions(): - tf = nnlib.tf - - def tf_dssim_(max_value=1.0): - def func(t1,t2): - return (1.0 - tf.image.ssim (t1, t2, max_value)) / 2.0 - return func - nnlib.tf_dssim = tf_dssim_ - - def tf_ssim_(max_value=1.0): - def func(t1,t2): - return tf.image.ssim (t1, t2, max_value) - return func - nnlib.tf_ssim = tf_ssim_ - - def tf_resize_like_(ref_tensor): - def func(input_tensor): - H, W = ref_tensor.get_shape()[1], ref_tensor.get_shape()[2] - return tf.image.resize_bilinear(input_tensor, [H.value, W.value]) - return func - nnlib.tf_resize_like = tf_resize_like_ - - def tf_rgb_to_lab(): - def func(rgb_input): - with tf.name_scope("rgb_to_lab"): - srgb_pixels = tf.reshape(rgb_input, [-1, 3]) - - with tf.name_scope("srgb_to_xyz"): - linear_mask = tf.cast(srgb_pixels <= 0.04045, dtype=tf.float32) - exponential_mask = tf.cast(srgb_pixels > 0.04045, dtype=tf.float32) - rgb_pixels = (srgb_pixels / 12.92 * linear_mask) + (((srgb_pixels + 0.055) / 1.055) ** 2.4) * exponential_mask - rgb_to_xyz = tf.constant([ - # X Y Z - [0.412453, 0.212671, 0.019334], # R - [0.357580, 0.715160, 0.119193], # G - [0.180423, 0.072169, 0.950227], # B - ]) - xyz_pixels = tf.matmul(rgb_pixels, rgb_to_xyz) - - # https://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions - with tf.name_scope("xyz_to_cielab"): - # convert to fx = f(X/Xn), fy = f(Y/Yn), fz = f(Z/Zn) - - # normalize for D65 white point - xyz_normalized_pixels = tf.multiply(xyz_pixels, [1/0.950456, 1.0, 1/1.088754]) - - epsilon = 6/29 - linear_mask = tf.cast(xyz_normalized_pixels <= (epsilon**3), dtype=tf.float32) - exponential_mask = tf.cast(xyz_normalized_pixels > (epsilon**3), dtype=tf.float32) - fxfyfz_pixels = (xyz_normalized_pixels / (3 * epsilon**2) + 4/29) * linear_mask + (xyz_normalized_pixels ** (1/3)) * exponential_mask - - # convert to lab - fxfyfz_to_lab = tf.constant([ - # l a b - [ 0.0, 500.0, 0.0], # fx - [116.0, -500.0, 200.0], # fy - [ 0.0, 0.0, -200.0], # fz - ]) - lab_pixels = tf.matmul(fxfyfz_pixels, fxfyfz_to_lab) + tf.constant([-16.0, 0.0, 0.0]) - return tf.reshape(lab_pixels, tf.shape(rgb_input)) - return func - nnlib.tf_rgb_to_lab = tf_rgb_to_lab - - def tf_lab_to_rgb(): - def func(lab): - with tf.name_scope("lab_to_rgb"): - lab_pixels = tf.reshape(lab, [-1, 3]) - - # https://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions - with tf.name_scope("cielab_to_xyz"): - # convert to fxfyfz - lab_to_fxfyfz = tf.constant([ - # fx fy fz - [1/116.0, 1/116.0, 1/116.0], # l - [1/500.0, 0.0, 0.0], # a - [ 0.0, 0.0, -1/200.0], # b - ]) - fxfyfz_pixels = tf.matmul(lab_pixels + tf.constant([16.0, 0.0, 0.0]), lab_to_fxfyfz) - - # convert to xyz - epsilon = 6/29 - linear_mask = tf.cast(fxfyfz_pixels <= epsilon, dtype=tf.float32) - exponential_mask = tf.cast(fxfyfz_pixels > epsilon, dtype=tf.float32) - xyz_pixels = (3 * epsilon**2 * (fxfyfz_pixels - 4/29)) * linear_mask + (fxfyfz_pixels ** 3) * exponential_mask - - # denormalize for D65 white point - xyz_pixels = tf.multiply(xyz_pixels, [0.950456, 1.0, 1.088754]) - - with tf.name_scope("xyz_to_srgb"): - xyz_to_rgb = tf.constant([ - # r g b - [ 3.2404542, -0.9692660, 0.0556434], # x - [-1.5371385, 1.8760108, -0.2040259], # y - [-0.4985314, 0.0415560, 1.0572252], # z - ]) - rgb_pixels = tf.matmul(xyz_pixels, xyz_to_rgb) - # avoid a slightly negative number messing up the conversion - rgb_pixels = tf.clip_by_value(rgb_pixels, 0.0, 1.0) - linear_mask = tf.cast(rgb_pixels <= 0.0031308, dtype=tf.float32) - exponential_mask = tf.cast(rgb_pixels > 0.0031308, dtype=tf.float32) - srgb_pixels = (rgb_pixels * 12.92 * linear_mask) + ((rgb_pixels ** (1/2.4) * 1.055) - 0.055) * exponential_mask - - return tf.reshape(srgb_pixels, tf.shape(lab)) - return func - nnlib.tf_lab_to_rgb = tf_lab_to_rgb - - def tf_image_histogram(): - def func(input): - x = input - x += 1 / 255.0 - - output = [] - for i in range(256, 0, -1): - v = i / 255.0 - y = (x - v) * 1000 - - y = tf.clip_by_value (y, -1.0, 0.0) + 1 - - output.append ( tf.reduce_sum (y) ) - x -= y*v - - return tf.stack ( output[::-1] ) - return func - nnlib.tf_image_histogram = tf_image_histogram - - def tf_adain(epsilon=1e-5): - def func(content, style): - axes = [1,2] - c_mean, c_var = tf.nn.moments(content, axes=axes, keep_dims=True) - s_mean, s_var = tf.nn.moments(style, axes=axes, keep_dims=True) - c_std, s_std = tf.sqrt(c_var + epsilon), tf.sqrt(s_var + epsilon) - return s_std * (content - c_mean) / c_std + s_mean - return func - nnlib.tf_adain = tf_adain - - def tf_gaussian_blur(radius=2.0): - def gaussian_kernel(size,mean,std): - d = tf.distributions.Normal( float(mean), float(std) ) - - vals = d.prob(tf.range(start = -int(size), limit = int(size) + 1, dtype = tf.float32)) - - gauss_kernel = tf.einsum('i,j->ij', - vals, - vals) - - return gauss_kernel / tf.reduce_sum(gauss_kernel) - - gauss_kernel = gaussian_kernel(radius, 1.0, radius ) - gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis] - - def func(input): - input_nc = input.get_shape().as_list()[-1] - inputs = tf.split(input, input_nc, -1) - - outputs = [] - for i in range(len(inputs)): - outputs += [ tf.nn.conv2d( inputs[i] , gauss_kernel, strides=[1, 1, 1, 1], padding="SAME") ] - - return tf.concat (outputs, axis=-1) - return func - nnlib.tf_gaussian_blur = tf_gaussian_blur - - #any channel count style diff - #outputs 0.0 .. 1.0 style difference*loss_weight , 0.0 - no diff - def tf_style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, batch_normalize=False, epsilon=1e-5): - gblur = tf_gaussian_blur(gaussian_blur_radius) - - def sd(content, style): - content_nc = content.get_shape().as_list()[-1] - style_nc = style.get_shape().as_list()[-1] - if content_nc != style_nc: - raise Exception("tf_style_loss() content_nc != style_nc") - - axes = [1,2] - c_mean, c_var = tf.nn.moments(content, axes=axes, keep_dims=True) - s_mean, s_var = tf.nn.moments(style, axes=axes, keep_dims=True) - c_std, s_std = tf.sqrt(c_var + epsilon), tf.sqrt(s_var + epsilon) - - mean_loss = tf.reduce_sum(tf.squared_difference(c_mean, s_mean)) - std_loss = tf.reduce_sum(tf.squared_difference(c_std, s_std)) - - if batch_normalize: - #normalize w.r.t batch size - n = tf.cast(tf.shape(content)[0], dtype=tf.float32) - mean_loss /= n - std_loss /= n - - return (mean_loss + std_loss) * loss_weight - - def func(target, style): - if gaussian_blur_radius > 0.0: - return sd( gblur(target), gblur(style)) - else: - return sd( target, style ) - return func - - nnlib.tf_style_loss = tf_style_loss - @staticmethod def import_keras(device_config = None): if nnlib.keras is not None: return nnlib.code_import_keras - nnlib.import_tf(device_config) - device_config = nnlib.active_DeviceConfig + if device_config is None: + device_config = nnlib.active_DeviceConfig + + nnlib.active_DeviceConfig = device_config + + if "tensorflow" in device_config.backend: + nnlib._import_tf(device_config) + device_config = nnlib.active_DeviceConfig + elif device_config.backend == "plaidML": + os.environ["KERAS_BACKEND"] = "plaidml.keras.backend" + os.environ["PLAIDML_DEVICE_IDS"] = ",".join ( [ nnlib.device.getDeviceID(idx) for idx in device_config.gpu_idxs] ) + if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1': suppressor = std_utils.suppress_stdout_stderr().__enter__() - + import keras as keras_ nnlib.keras = keras_ + if device_config.backend == "plaidML": + import plaidml + import plaidml.tile + nnlib.PML = plaidml + nnlib.PMLK = plaidml.keras.backend + nnlib.PMLTile = plaidml.tile + if device_config.use_fp16: nnlib.keras.backend.set_floatx('float16') - nnlib.keras.backend.set_session(nnlib.tf_sess) + if "tensorflow" in device_config.backend: + nnlib.keras.backend.set_session(nnlib.tf_sess) + nnlib.keras.backend.set_image_data_format('channels_last') if 'TF_SUPPRESS_STD' in os.environ.keys() and os.environ['TF_SUPPRESS_STD'] == '1': @@ -411,14 +186,12 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator nnlib.__initialize_keras_functions() nnlib.code_import_keras = compile (nnlib.code_import_keras_string,'','exec') - + return nnlib.code_import_keras @staticmethod def __initialize_keras_functions(): - tf = nnlib.tf keras = nnlib.keras K = keras.backend - exec (nnlib.code_import_tf, locals(), globals()) def modelify(model_functor): def func(tensor): @@ -427,68 +200,172 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator nnlib.modelify = modelify - class ReflectionPadding2D(keras.layers.Layer): - def __init__(self, padding=(1, 1), **kwargs): - self.padding = tuple(padding) - self.input_spec = [keras.layers.InputSpec(ndim=4)] - super(ReflectionPadding2D, self).__init__(**kwargs) + def gaussian_blur(radius=2.0): + def gaussian(x, mu, sigma): + return np.exp(-(float(x) - float(mu)) ** 2 / (2 * sigma ** 2)) - def compute_output_shape(self, s): - """ If you are using "channels_last" configuration""" - return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]) + def make_kernel(sigma): + kernel_size = max(3, int(2 * 2 * sigma + 1)) + mean = np.floor(0.5 * kernel_size) + kernel_1d = np.array([gaussian(x, mean, sigma) for x in range(kernel_size)]) + np_kernel = np.outer(kernel_1d, kernel_1d).astype(dtype=K.floatx()) + kernel = np_kernel / np.sum(np_kernel) + return kernel + + gauss_kernel = make_kernel(radius) + gauss_kernel = gauss_kernel[:, :,np.newaxis, np.newaxis] - def call(self, x, mask=None): - w_pad,h_pad = self.padding - return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT') - nnlib.ReflectionPadding2D = ReflectionPadding2D + def func(input): + inputs = [ input[:,:,:,i:i+1] for i in range( K.int_shape( input )[-1] ) ] - class DSSIMLoss(object): - def __init__(self, is_tanh=False): - self.is_tanh = is_tanh - - def __call__(self,y_true, y_pred): - if not self.is_tanh: - return (1.0 - tf.image.ssim (y_true, y_pred, 1.0)) / 2.0 - else: - return (1.0 - tf.image.ssim ((y_true/2+0.5), (y_pred/2+0.5), 1.0)) / 2.0 - nnlib.DSSIMLoss = DSSIMLoss + outputs = [] + for i in range(len(inputs)): + outputs += [ K.conv2d( inputs[i] , K.constant(gauss_kernel) , strides=(1,1), padding="same") ] + + return K.concatenate (outputs, axis=-1) + return func + nnlib.gaussian_blur = gaussian_blur - class DSSIMMSEMaskLoss(object): - def __init__(self, mask, is_mse=False): - self.mask = mask - self.is_mse = is_mse + def style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, wnd_size=0, step_size=1): + if gaussian_blur_radius > 0.0: + gblur = gaussian_blur(gaussian_blur_radius) + + def sd(content, style, loss_weight): + content_nc = K.int_shape(content)[-1] + style_nc = K.int_shape(style)[-1] + if content_nc != style_nc: + raise Exception("style_loss() content_nc != style_nc") + + axes = [1,2] + c_mean, c_var = K.mean(content, axis=axes, keepdims=True), K.var(content, axis=axes, keepdims=True) + s_mean, s_var = K.mean(style, axis=axes, keepdims=True), K.var(style, axis=axes, keepdims=True) + c_std, s_std = K.sqrt(c_var + 1e-5), K.sqrt(s_var + 1e-5) + + mean_loss = K.sum(K.square(c_mean-s_mean)) + std_loss = K.sum(K.square(c_std-s_std)) - def __call__(self,y_true, y_pred): - total_loss = None - - mask = self.mask - if self.is_mse: - blur_mask = tf_gaussian_blur(max(1, mask.get_shape().as_list()[1] // 32))(mask) - return K.mean ( 100*K.square( y_true*blur_mask - y_pred*blur_mask ) ) + return (mean_loss + std_loss) * ( loss_weight / float(content_nc) ) + + def func(target, style): + if wnd_size == 0: + if gaussian_blur_radius > 0.0: + return sd( gblur(target), gblur(style), loss_weight=loss_weight) + else: + return sd( target, style, loss_weight=loss_weight ) else: - return (1.0 - (tf.image.ssim (y_true*mask, y_pred*mask, 1.0))) / 2.0 - nnlib.DSSIMMSEMaskLoss = DSSIMMSEMaskLoss + #currently unused + if nnlib.tf is not None: + sh = K.int_shape(target)[1] + k = (sh-wnd_size) // step_size + 1 + if gaussian_blur_radius > 0.0: + target, style = gblur(target), gblur(style) + target = nnlib.tf.image.extract_image_patches(target, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID') + style = nnlib.tf.image.extract_image_patches(style, [1,k,k,1], [1,1,1,1], [1,step_size,step_size,1], 'VALID') + return sd( target, style, loss_weight ) + if nnlib.PML is not None: + print ("Sorry, plaidML backend does not support style_loss") + return 0 + return func + nnlib.style_loss = style_loss + + + def dssim(k1=0.01, k2=0.03, max_value=1.0): + # port of tf.image.ssim to pure keras in order to work on plaidML backend. + + def func(y_true, y_pred): + ch = K.int_shape(y_pred)[-1] + + def softmax(x, axis=-1): #from K numpy backend + y = np.exp(x - np.max(x, axis, keepdims=True)) + return y / np.sum(y, axis, keepdims=True) + + def gauss_kernel(size, sigma): + coords = np.arange(0,size, dtype=K.floatx() ) + coords -= (size - 1 ) / 2.0 + g = coords**2 + g *= ( -0.5 / (sigma**2) ) + g = np.reshape (g, (1,-1)) + np.reshape(g, (-1,1) ) + g = np.reshape (g, (1,-1)) + g = softmax(g) + g = np.reshape (g, (size, size, 1, 1)) + g = np.tile (g, (1,1,ch,1)) + return K.constant(g, dtype=K.floatx() ) + + kernel = gauss_kernel(11,1.5) + + def reducer(x): + shape = K.shape(x) + x = K.reshape(x, (-1, shape[-3] , shape[-2], shape[-1]) ) + y = K.depthwise_conv2d(x, kernel, strides=(1, 1), padding='valid') + y_shape = K.shape(y) + return K.reshape(y, (shape[0], y_shape[1], y_shape[2], y_shape[3] ) ) + + def _ssim_helper(x, y, reducer, compensation=1.0): + c1 = (k1 * max_value) ** 2 + c2 = (k2 * max_value) ** 2 + + mean0 = reducer(x) + mean1 = reducer(y) + num0 = mean0 * mean1 * 2.0 + den0 = K.square(mean0) + K.square(mean1) + luminance = (num0 + c1) / (den0 + c1) + + num1 = reducer(x * y) * 2.0 + den1 = reducer(K.square(x) + K.square(y)) + c2 *= compensation + cs = (num1 - num0 + c2) / (den1 - den0 + c2) + + return luminance, cs + + luminance, cs = _ssim_helper(y_true, y_pred, reducer) + ssim_val = K.mean(luminance * cs, axis=(-3, -2) ) + return K.mean( (1.0 - ssim_val ) / 2.0 ) + + return func + nnlib.dssim = dssim class PixelShuffler(keras.layers.Layer): def __init__(self, size=(2, 2), data_format=None, **kwargs): super(PixelShuffler, self).__init__(**kwargs) - self.data_format = keras.backend.common.normalize_data_format(data_format) + self.data_format = K.normalize_data_format(data_format) self.size = keras.utils.conv_utils.normalize_tuple(size, 2, 'size') def call(self, inputs): - input_shape = keras.backend.int_shape(inputs) + + input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': - return tf.depth_to_space(inputs, self.size[0], 'NCHW') + batch_size, c, h, w = input_shape + if batch_size is None: + batch_size = -1 + rh, rw = self.size + oh, ow = h * rh, w * rw + oc = c // (rh * rw) + + out = K.reshape(inputs, (batch_size, rh, rw, oc, h, w)) + out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) + out = K.reshape(out, (batch_size, oc, oh, ow)) + return out elif self.data_format == 'channels_last': - return tf.depth_to_space(inputs, self.size[0], 'NHWC') + batch_size, h, w, c = input_shape + if batch_size is None: + batch_size = -1 + rh, rw = self.size + oh, ow = h * rh, w * rw + oc = c // (rh * rw) + + out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc)) + out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) + out = K.reshape(out, (batch_size, oh, ow, oc)) + return out def compute_output_shape(self, input_shape): + if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + @@ -525,11 +402,28 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator 'data_format': self.data_format} base_config = super(PixelShuffler, self).get_config() - return dict(list(base_config.items()) + list(config.items())) - - nnlib.PixelShuffler = PixelShuffler - nnlib.SubpixelUpscaler = PixelShuffler + return dict(list(base_config.items()) + list(config.items())) + nnlib.PixelShuffler = PixelShuffler + nnlib.SubpixelUpscaler = PixelShuffler + ''' + + class ReflectionPadding2D(keras.layers.Layer): + def __init__(self, padding=(1, 1), **kwargs): + self.padding = tuple(padding) + self.input_spec = [keras.layers.InputSpec(ndim=4)] + super(ReflectionPadding2D, self).__init__(**kwargs) + + def compute_output_shape(self, s): + """ If you are using "channels_last" configuration""" + return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]) + + def call(self, x, mask=None): + w_pad,h_pad = self.padding + return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT') + nnlib.ReflectionPadding2D = ReflectionPadding2D + + class AddUniformNoise(keras.layers.Layer): def __init__(self, power=1.0, minval=-1.0, maxval=1.0, **kwargs): super(AddUniformNoise, self).__init__(**kwargs) @@ -548,7 +442,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator base_config = super(AddUniformNoise, self).get_config() return dict(list(base_config.items()) + list(config.items())) nnlib.AddUniformNoise = AddUniformNoise - + ''' @staticmethod def import_keras_contrib(device_config = None): if nnlib.keras_contrib is not None: @@ -570,20 +464,17 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator import dlib as dlib_ nnlib.dlib = dlib_ - if not device_config.cpu_only and len(device_config.gpu_idxs) > 0: - nnlib.dlib.cuda.set_device(device_config.gpu_idxs[0]) - + if not device_config.cpu_only and "tensorflow" in device_config.backend and len(device_config.gpu_idxs) > 0: + nnlib.dlib.cuda.set_device(device_config.gpu_idxs[0]) nnlib.code_import_dlib = compile (nnlib.code_import_dlib_string,'','exec') @staticmethod def import_all(device_config = None): - if nnlib.code_import_all is None: - nnlib.import_tf(device_config) + if nnlib.code_import_all is None: nnlib.import_keras(device_config) - nnlib.import_keras_contrib(device_config) - nnlib.code_import_all = compile (nnlib.code_import_tf_string + '\n' - + nnlib.code_import_keras_string + '\n' + nnlib.import_keras_contrib(device_config) + nnlib.code_import_all = compile (nnlib.code_import_keras_string + '\n' + nnlib.code_import_keras_contrib_string + nnlib.code_import_all_string,'','exec') nnlib.__initialize_all_functions() @@ -592,6 +483,24 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator @staticmethod def __initialize_all_functions(): + exec (nnlib.import_keras(), locals(), globals()) + exec (nnlib.import_keras_contrib(), locals(), globals()) + + class DSSIMMSEMaskLoss(object): + def __init__(self, mask, is_mse=False): + self.mask = mask + self.is_mse = is_mse + def __call__(self,y_true, y_pred): + total_loss = None + mask = self.mask + if self.is_mse: + blur_mask = gaussian_blur(max(1, K.int_shape(mask)[1] // 64))(mask) + return K.mean ( 50*K.square( y_true*blur_mask - y_pred*blur_mask ) ) + else: + return 10*dssim() (y_true*mask, y_pred*mask) + nnlib.DSSIMMSEMaskLoss = DSSIMMSEMaskLoss + + ''' def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False): exec (nnlib.import_all(), locals(), globals()) @@ -775,7 +684,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator return Conv2D( 1, 4, 1, 'valid')(x) return func nnlib.NLayerDiscriminator = NLayerDiscriminator - + ''' @staticmethod def finalize_all(): if nnlib.keras_contrib is not None: @@ -786,7 +695,6 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator nnlib.keras = None if nnlib.tf is not None: - nnlib.tf_sess.close() nnlib.tf_sess = None nnlib.tf = None diff --git a/requirements-gpu-cuda9-cudnn7.txt b/requirements-gpu-opencl-cuda9-cudnn7.txt similarity index 90% rename from requirements-gpu-cuda9-cudnn7.txt rename to requirements-gpu-opencl-cuda9-cudnn7.txt index 4d2d071..f2ee595 100644 --- a/requirements-gpu-cuda9-cudnn7.txt +++ b/requirements-gpu-opencl-cuda9-cudnn7.txt @@ -5,6 +5,7 @@ h5py==2.7.1 Keras==2.2.4 opencv-python==4.0.0.21 tensorflow-gpu==1.11.0 +plaidml-keras==0.5.0 scikit-image dlib==19.10.0 tqdm diff --git a/samples/SampleProcessor.py b/samples/SampleProcessor.py index 7f7ec49..93fe1c6 100644 --- a/samples/SampleProcessor.py +++ b/samples/SampleProcessor.py @@ -8,29 +8,28 @@ from facelib import FaceType class SampleProcessor(object): class TypeFlags(IntEnum): - SOURCE = 0x00000001, - WARPED = 0x00000002, - WARPED_TRANSFORMED = 0x00000004, - TRANSFORMED = 0x00000008, - LANDMARKS_ARRAY = 0x00000010, #currently unused - - RANDOM_CLOSE = 0x00000020, - MORPH_TO_RANDOM_CLOSE \ - = 0x00000040, - - FACE_ALIGN_HALF = 0x00000100, - FACE_ALIGN_FULL = 0x00000200, - FACE_ALIGN_HEAD = 0x00000400, - FACE_ALIGN_AVATAR = 0x00000800, - - FACE_MASK_FULL = 0x00001000, - FACE_MASK_EYES = 0x00002000, - - MODE_BGR = 0x01000000, #BGR - MODE_G = 0x02000000, #Grayscale - MODE_GGG = 0x04000000, #3xGrayscale - MODE_M = 0x08000000, #mask only - MODE_BGR_SHUFFLE = 0x10000000, #BGR shuffle + SOURCE = 0x00000001, + WARPED = 0x00000002, + WARPED_TRANSFORMED = 0x00000004, + TRANSFORMED = 0x00000008, + LANDMARKS_ARRAY = 0x00000010, #currently unused + + RANDOM_CLOSE = 0x00000020, + MORPH_TO_RANDOM_CLOSE = 0x00000040, + + FACE_ALIGN_HALF = 0x00000100, + FACE_ALIGN_FULL = 0x00000200, + FACE_ALIGN_HEAD = 0x00000400, + FACE_ALIGN_AVATAR = 0x00000800, + + FACE_MASK_FULL = 0x00001000, + FACE_MASK_EYES = 0x00002000, + + MODE_BGR = 0x01000000, #BGR + MODE_G = 0x02000000, #Grayscale + MODE_GGG = 0x04000000, #3xGrayscale + MODE_M = 0x08000000, #mask only + MODE_BGR_SHUFFLE = 0x10000000, #BGR shuffle class Options(object): def __init__(self, random_flip = True, normalize_tanh = False, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05]): diff --git a/utils/image_utils.py b/utils/image_utils.py index 0670a16..98e6e4a 100644 --- a/utils/image_utils.py +++ b/utils/image_utils.py @@ -5,7 +5,6 @@ import cv2 import localization from scipy.spatial import Delaunay from PIL import Image, ImageDraw, ImageFont -from nnlib import nnlib def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, source_mask=None, target_mask=None): """ @@ -423,24 +422,4 @@ def reduce_colors (img_bgr, n_colors): img_bgr = cv2.cvtColor( np.array(img_rgb_p, dtype=np.float32) / 255.0, cv2.COLOR_RGB2BGR ) return img_bgr - - -class TFLabConverter(): - def __init__(self): - exec (nnlib.import_tf(), locals(), globals()) - self.tf_sess = tf_sess - - self.bgr_input_tensor = tf.placeholder("float", [None, None, 3]) - self.lab_input_tensor = tf.placeholder("float", [None, None, 3]) - - self.lab_output_tensor = tf_rgb_to_lab()(self.bgr_input_tensor) - self.bgr_output_tensor = tf_lab_to_rgb()(self.lab_input_tensor) - - - def bgr2lab(self, bgr): - return self.tf_sess.run(self.lab_output_tensor, feed_dict={self.bgr_input_tensor: bgr}) - - def lab2bgr(self, lab): - return self.tf_sess.run(self.bgr_output_tensor, feed_dict={self.lab_input_tensor: lab}) - - \ No newline at end of file + \ No newline at end of file