mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-14 10:46:59 -07:00
update == 04.20.2019 == (#242)
* superb improved fanseg * _ * _ * added FANseg extractor for src and dst faces to use it in training * - * _ * _ * update to 'partial' func * _ * trained FANSeg_256_full_face.h5, new experimental models: AVATAR, RecycleGAN * _ * _ * _ * fix for TCC mode cards(tesla), was conflict with plaidML initialization. * _ * update manuals * _
This commit is contained in:
parent
7be2fd67f5
commit
046649e6be
32 changed files with 1152 additions and 329 deletions
|
@ -16,14 +16,23 @@ class Model(ModelBase):
|
|||
ask_sort_by_yaw=False,
|
||||
ask_random_flip=False,
|
||||
ask_src_scale_mod=False)
|
||||
|
||||
|
||||
#override
|
||||
def onInitializeOptions(self, is_first_run, ask_override):
|
||||
default_face_type = 'f'
|
||||
if is_first_run:
|
||||
self.options['face_type'] = io.input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower()
|
||||
else:
|
||||
self.options['face_type'] = self.options.get('face_type', default_face_type)
|
||||
|
||||
#override
|
||||
def onInitialize(self):
|
||||
exec(nnlib.import_all(), locals(), globals())
|
||||
self.set_vram_batch_requirements( {1.5:4} )
|
||||
|
||||
self.resolution = 256
|
||||
self.face_type = FaceType.FULL
|
||||
self.face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF
|
||||
|
||||
|
||||
self.fan_seg = FANSegmentator(self.resolution,
|
||||
FaceType.toString(self.face_type),
|
||||
|
@ -33,18 +42,18 @@ class Model(ModelBase):
|
|||
|
||||
if self.is_training_mode:
|
||||
f = SampleProcessor.TypeFlags
|
||||
f_type = f.FACE_TYPE_FULL
|
||||
face_type = f.FACE_TYPE_FULL if self.options['face_type'] == 'f' else f.FACE_TYPE_HALF
|
||||
|
||||
self.set_training_data_generators ([
|
||||
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True, motion_blur = [25, 1], normalize_tanh = True ),
|
||||
output_sample_types=[ [f.TRANSFORMED | f_type | f.MODE_BGR_SHUFFLE | f.OPT_APPLY_MOTION_BLUR, self.resolution],
|
||||
[f.TRANSFORMED | f_type | f.MODE_M | f.FACE_MASK_FULL, self.resolution]
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True, motion_blur = [25, 1] ),
|
||||
output_sample_types=[ [f.WARPED_TRANSFORMED | face_type | f.MODE_BGR_SHUFFLE | f.OPT_APPLY_MOTION_BLUR, self.resolution],
|
||||
[f.WARPED_TRANSFORMED | face_type | f.MODE_M | f.FACE_MASK_FULL, self.resolution]
|
||||
]),
|
||||
|
||||
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True, normalize_tanh = True ),
|
||||
output_sample_types=[ [f.TRANSFORMED | f_type | f.MODE_BGR_SHUFFLE, self.resolution]
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True ),
|
||||
output_sample_types=[ [f.TRANSFORMED | face_type | f.MODE_BGR_SHUFFLE, self.resolution]
|
||||
])
|
||||
])
|
||||
|
||||
|
@ -56,20 +65,18 @@ class Model(ModelBase):
|
|||
def onTrainOneIter(self, generators_samples, generators_list):
|
||||
target_src, target_src_mask = generators_samples[0]
|
||||
|
||||
loss = self.fan_seg.train_on_batch( [target_src], [target_src_mask] )
|
||||
loss,acc = self.fan_seg.train_on_batch( [target_src], [target_src_mask] )
|
||||
|
||||
return ( ('loss', loss), )
|
||||
return ( ('loss', loss), ('acc',acc))
|
||||
|
||||
#override
|
||||
def onGetPreview(self, sample):
|
||||
test_A = sample[0][0][0:4] #first 4 samples
|
||||
test_B = sample[1][0][0:4] #first 4 samples
|
||||
|
||||
mAA = self.fan_seg.extract_from_bgr([test_A])
|
||||
mBB = self.fan_seg.extract_from_bgr([test_B])
|
||||
|
||||
test_A, test_B, = [ np.clip( (x + 1.0)/2.0, 0.0, 1.0) for x in [test_A, test_B] ]
|
||||
|
||||
mAA = self.fan_seg.extract(test_A)
|
||||
mBB = self.fan_seg.extract(test_B)
|
||||
|
||||
mAA = np.repeat ( mAA, (3,), -1)
|
||||
mBB = np.repeat ( mBB, (3,), -1)
|
||||
|
||||
|
@ -89,6 +96,6 @@ class Model(ModelBase):
|
|||
test_B[i,:,:,0:3]*mBB[i],
|
||||
), axis=1) )
|
||||
|
||||
return [ ('FANSegmentator', np.concatenate ( st, axis=0 ) ),
|
||||
('never seen', np.concatenate ( st2, axis=0 ) ),
|
||||
return [ ('training data', np.concatenate ( st, axis=0 ) ),
|
||||
('evaluating data', np.concatenate ( st2, axis=0 ) ),
|
||||
]
|
477
models/Model_RecycleGAN/Model.py
Normal file
477
models/Model_RecycleGAN/Model.py
Normal file
|
@ -0,0 +1,477 @@
|
|||
from functools import partial
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from facelib import FaceType
|
||||
from interact import interact as io
|
||||
from mathlib import get_power_of_two
|
||||
from models import ModelBase
|
||||
from nnlib import nnlib
|
||||
from samplelib import *
|
||||
|
||||
class RecycleGANModel(ModelBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs,
|
||||
ask_sort_by_yaw=False,
|
||||
ask_random_flip=False,
|
||||
ask_src_scale_mod=False)
|
||||
|
||||
#override
|
||||
def onInitializeOptions(self, is_first_run, ask_override):
|
||||
if is_first_run:
|
||||
self.options['resolution'] = io.input_int("Resolution ( 128,256 ?:help skip:128) : ", 128, [128,256], help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
|
||||
else:
|
||||
self.options['resolution'] = self.options.get('resolution', 128)
|
||||
|
||||
#override
|
||||
def onInitialize(self, batch_size=-1, **in_options):
|
||||
exec(nnlib.code_import_all, locals(), globals())
|
||||
self.set_vram_batch_requirements({6:16})
|
||||
|
||||
resolution = self.options['resolution']
|
||||
bgr_shape = (resolution, resolution, 3)
|
||||
ngf = 64
|
||||
npf = 32
|
||||
ndf = 64
|
||||
lambda_A = 10
|
||||
lambda_B = 10
|
||||
|
||||
use_batch_norm = True #created_batch_size > 1
|
||||
self.GA = modelify(RecycleGANModel.ResNet (bgr_shape[2], use_batch_norm, n_blocks=6, ngf=ngf, use_dropout=True))(Input(bgr_shape))
|
||||
self.GB = modelify(RecycleGANModel.ResNet (bgr_shape[2], use_batch_norm, n_blocks=6, ngf=ngf, use_dropout=True))(Input(bgr_shape))
|
||||
|
||||
#self.GA = modelify(UNet (bgr_shape[2], use_batch_norm, num_downs=get_power_of_two(resolution)-1, ngf=ngf, use_dropout=True))(Input(bgr_shape))
|
||||
#self.GB = modelify(UNet (bgr_shape[2], use_batch_norm, num_downs=get_power_of_two(resolution)-1, ngf=ngf, use_dropout=True))(Input(bgr_shape))
|
||||
|
||||
self.PA = modelify(RecycleGANModel.UNetTemporalPredictor(bgr_shape[2], use_batch_norm, ngf=npf))([Input(bgr_shape), Input(bgr_shape)])
|
||||
self.PB = modelify(RecycleGANModel.UNetTemporalPredictor(bgr_shape[2], use_batch_norm, ngf=npf))([Input(bgr_shape), Input(bgr_shape)])
|
||||
|
||||
self.DA = modelify(RecycleGANModel.PatchDiscriminator(ndf=ndf) ) (Input(bgr_shape))
|
||||
self.DB = modelify(RecycleGANModel.PatchDiscriminator(ndf=ndf) ) (Input(bgr_shape))
|
||||
|
||||
if not self.is_first_run():
|
||||
weights_to_load = [
|
||||
(self.GA, 'GA.h5'),
|
||||
(self.DA, 'DA.h5'),
|
||||
(self.PA, 'PA.h5'),
|
||||
(self.GB, 'GB.h5'),
|
||||
(self.DB, 'DB.h5'),
|
||||
(self.PB, 'PB.h5'),
|
||||
]
|
||||
self.load_weights_safe(weights_to_load)
|
||||
|
||||
real_A0 = Input(bgr_shape, name="real_A0")
|
||||
real_A1 = Input(bgr_shape, name="real_A1")
|
||||
real_A2 = Input(bgr_shape, name="real_A2")
|
||||
|
||||
real_B0 = Input(bgr_shape, name="real_B0")
|
||||
real_B1 = Input(bgr_shape, name="real_B1")
|
||||
real_B2 = Input(bgr_shape, name="real_B2")
|
||||
|
||||
DA_ones = K.ones_like ( K.shape(self.DA.outputs[0]) )
|
||||
DA_zeros = K.zeros_like ( K.shape(self.DA.outputs[0] ))
|
||||
DB_ones = K.ones_like ( K.shape(self.DB.outputs[0] ))
|
||||
DB_zeros = K.zeros_like ( K.shape(self.DB.outputs[0] ))
|
||||
|
||||
def DLoss(labels,logits):
|
||||
return K.mean(K.binary_crossentropy(labels,logits))
|
||||
|
||||
def CycleLoss (t1,t2):
|
||||
return K.mean(K.abs(t1 - t2))
|
||||
|
||||
def RecurrentLOSS(t1,t2):
|
||||
return K.mean(K.abs(t1 - t2))
|
||||
|
||||
def RecycleLOSS(t1,t2):
|
||||
return K.mean(K.abs(t1 - t2))
|
||||
|
||||
fake_B0 = self.GA(real_A0)
|
||||
fake_B1 = self.GA(real_A1)
|
||||
|
||||
fake_A0 = self.GB(real_B0)
|
||||
fake_A1 = self.GB(real_B1)
|
||||
|
||||
real_A0_d = self.DA(real_A0)
|
||||
real_A0_d_ones = K.ones_like(real_A0_d)
|
||||
real_A1_d = self.DA(real_A1)
|
||||
real_A1_d_ones = K.ones_like(real_A1_d)
|
||||
|
||||
fake_A0_d = self.DA(fake_A0)
|
||||
fake_A0_d_ones = K.ones_like(fake_A0_d)
|
||||
fake_A0_d_zeros = K.zeros_like(fake_A0_d)
|
||||
|
||||
fake_A1_d = self.DA(fake_A1)
|
||||
fake_A1_d_ones = K.ones_like(fake_A1_d)
|
||||
fake_A1_d_zeros = K.zeros_like(fake_A1_d)
|
||||
|
||||
real_B0_d = self.DB(real_B0)
|
||||
real_B0_d_ones = K.ones_like(real_B0_d)
|
||||
|
||||
real_B1_d = self.DB(real_B1)
|
||||
real_B1_d_ones = K.ones_like(real_B1_d)
|
||||
|
||||
fake_B0_d = self.DB(fake_B0)
|
||||
fake_B0_d_ones = K.ones_like(fake_B0_d)
|
||||
fake_B0_d_zeros = K.zeros_like(fake_B0_d)
|
||||
|
||||
fake_B1_d = self.DB(fake_B1)
|
||||
fake_B1_d_ones = K.ones_like(fake_B1_d)
|
||||
fake_B1_d_zeros = K.zeros_like(fake_B1_d)
|
||||
|
||||
pred_A2 = self.PA ( [real_A0, real_A1])
|
||||
pred_B2 = self.PB ( [real_B0, real_B1])
|
||||
rec_A2 = self.GB ( self.PB ( [fake_B0, fake_B1]) )
|
||||
rec_B2 = self.GA ( self.PA ( [fake_A0, fake_A1]))
|
||||
|
||||
|
||||
loss_GA = DLoss(fake_B0_d_ones, fake_B0_d ) + \
|
||||
DLoss(fake_B1_d_ones, fake_B1_d ) + \
|
||||
lambda_A * (RecurrentLOSS(pred_A2, real_A2) + \
|
||||
RecycleLOSS(rec_B2, real_B2) )
|
||||
|
||||
|
||||
weights_GA = self.GA.trainable_weights + self.PA.trainable_weights
|
||||
|
||||
loss_GB = DLoss(fake_A0_d_ones, fake_A0_d ) + \
|
||||
DLoss(fake_A1_d_ones, fake_A1_d ) + \
|
||||
lambda_B * (RecurrentLOSS(pred_B2, real_B2) + \
|
||||
RecycleLOSS(rec_A2, real_A2) )
|
||||
|
||||
weights_GB = self.GB.trainable_weights + self.PB.trainable_weights
|
||||
|
||||
def opt():
|
||||
return Adam(lr=2e-4, beta_1=0.5, beta_2=0.999, tf_cpu_mode=2)#, clipnorm=1)
|
||||
|
||||
self.GA_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_GA],
|
||||
opt().get_updates(loss_GA, weights_GA) )
|
||||
|
||||
self.GB_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_GB],
|
||||
opt().get_updates(loss_GB, weights_GB) )
|
||||
|
||||
###########
|
||||
|
||||
loss_D_A0 = ( DLoss(real_A0_d_ones, real_A0_d ) + \
|
||||
DLoss(fake_A0_d_zeros, fake_A0_d ) ) * 0.5
|
||||
|
||||
loss_D_A1 = ( DLoss(real_A1_d_ones, real_A1_d ) + \
|
||||
DLoss(fake_A1_d_zeros, fake_A1_d ) ) * 0.5
|
||||
|
||||
loss_D_A = loss_D_A0 + loss_D_A1
|
||||
|
||||
self.DA_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_D_A],
|
||||
opt().get_updates(loss_D_A, self.DA.trainable_weights) )
|
||||
|
||||
############
|
||||
|
||||
loss_D_B0 = ( DLoss(real_B0_d_ones, real_B0_d ) + \
|
||||
DLoss(fake_B0_d_zeros, fake_B0_d ) ) * 0.5
|
||||
|
||||
loss_D_B1 = ( DLoss(real_B1_d_ones, real_B1_d ) + \
|
||||
DLoss(fake_B1_d_zeros, fake_B1_d ) ) * 0.5
|
||||
|
||||
loss_D_B = loss_D_B0 + loss_D_B1
|
||||
|
||||
self.DB_train = K.function ([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[loss_D_B],
|
||||
opt().get_updates(loss_D_B, self.DB.trainable_weights) )
|
||||
|
||||
############
|
||||
|
||||
|
||||
self.G_view = K.function([real_A0, real_A1, real_A2, real_B0, real_B1, real_B2],[fake_A0, fake_A1, pred_A2, rec_A2, fake_B0, fake_B1, pred_B2, rec_B2 ])
|
||||
|
||||
|
||||
|
||||
if self.is_training_mode:
|
||||
f = SampleProcessor.TypeFlags
|
||||
self.set_training_data_generators ([
|
||||
SampleGeneratorImageTemporal(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
|
||||
temporal_image_count=3,
|
||||
sample_process_options=SampleProcessor.Options(random_flip = False, normalize_tanh = True),
|
||||
output_sample_types=[ [f.SOURCE | f.MODE_BGR, resolution] ] ),
|
||||
|
||||
SampleGeneratorImageTemporal(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
|
||||
temporal_image_count=3,
|
||||
sample_process_options=SampleProcessor.Options(random_flip = False, normalize_tanh = True),
|
||||
output_sample_types=[ [f.SOURCE | f.MODE_BGR, resolution] ] ),
|
||||
])
|
||||
else:
|
||||
self.G_convert = K.function([real_B0],[fake_A0])
|
||||
|
||||
#override
|
||||
def onSave(self):
|
||||
self.save_weights_safe( [[self.GA, 'GA.h5'],
|
||||
[self.GB, 'GB.h5'],
|
||||
[self.DA, 'DA.h5'],
|
||||
[self.DB, 'DB.h5'],
|
||||
[self.PA, 'PA.h5'],
|
||||
[self.PB, 'PB.h5'] ])
|
||||
|
||||
#override
|
||||
def onTrainOneIter(self, generators_samples, generators_list):
|
||||
source_src_0, source_src_1, source_src_2, = generators_samples[0]
|
||||
source_dst_0, source_dst_1, source_dst_2, = generators_samples[1]
|
||||
|
||||
feed = [source_src_0, source_src_1, source_src_2, source_dst_0, source_dst_1, source_dst_2]
|
||||
|
||||
loss_GA, = self.GA_train ( feed )
|
||||
loss_GB, = self.GB_train ( feed )
|
||||
loss_DA, = self.DA_train( feed )
|
||||
loss_DB, = self.DB_train( feed )
|
||||
|
||||
return ( ('GA', loss_GA), ('GB', loss_GB), ('DA', loss_DA), ('DB', loss_DB) )
|
||||
|
||||
#override
|
||||
def onGetPreview(self, sample):
|
||||
test_A0 = sample[0][0]
|
||||
test_A1 = sample[0][1]
|
||||
test_A2 = sample[0][2]
|
||||
|
||||
test_B0 = sample[1][0]
|
||||
test_B1 = sample[1][1]
|
||||
test_B2 = sample[1][2]
|
||||
|
||||
G_view_result = self.G_view([test_A0, test_A1, test_A2, test_B0, test_B1, test_B2])
|
||||
|
||||
fake_A0, fake_A1, pred_A2, rec_A2, fake_B0, fake_B1, pred_B2, rec_B2 = [ x[0] / 2 + 0.5 for x in G_view_result]
|
||||
test_A0, test_A1, test_A2, test_B0, test_B1, test_B2 = [ x[0] / 2 + 0.5 for x in [test_A0, test_A1, test_A2, test_B0, test_B1, test_B2] ]
|
||||
|
||||
r = np.concatenate ((np.concatenate ( (test_A0, test_A1, test_A2, pred_A2, fake_B0, fake_B1, rec_A2), axis=1),
|
||||
np.concatenate ( (test_B0, test_B1, test_B2, pred_B2, fake_A0, fake_A1, rec_B2), axis=1)
|
||||
), axis=0)
|
||||
|
||||
return [ ('RecycleGAN', r ) ]
|
||||
|
||||
def predictor_func (self, face):
|
||||
x = self.G_convert ( [ face[np.newaxis,...]*2-1 ] )[0]
|
||||
return np.clip ( x[0] / 2 + 0.5 , 0, 1)
|
||||
|
||||
#override
|
||||
def get_converter(self, **in_options):
|
||||
from converters import ConverterImage
|
||||
return ConverterImage(self.predictor_func,
|
||||
predictor_input_size=self.options['resolution'],
|
||||
**in_options)
|
||||
|
||||
@staticmethod
|
||||
def ResNet(output_nc, use_batch_norm, ngf=64, n_blocks=6, use_dropout=False):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
|
||||
if not use_batch_norm:
|
||||
use_bias = True
|
||||
def XNormalization(x):
|
||||
return InstanceNormalization (axis=-1)(x)
|
||||
else:
|
||||
use_bias = False
|
||||
def XNormalization(x):
|
||||
return BatchNormalization (axis=-1)(x)
|
||||
|
||||
XConv2D = partial(Conv2D, padding='same', use_bias=use_bias)
|
||||
XConv2DTranspose = partial(Conv2DTranspose, padding='same', use_bias=use_bias)
|
||||
|
||||
def func(input):
|
||||
|
||||
|
||||
def ResnetBlock(dim, use_dropout=False):
|
||||
def func(input):
|
||||
x = input
|
||||
|
||||
x = XConv2D(dim, 3, strides=1)(x)
|
||||
x = XNormalization(x)
|
||||
x = ReLU()(x)
|
||||
|
||||
if use_dropout:
|
||||
x = Dropout(0.5)(x)
|
||||
|
||||
x = XConv2D(dim, 3, strides=1)(x)
|
||||
x = XNormalization(x)
|
||||
x = ReLU()(x)
|
||||
return Add()([x,input])
|
||||
return func
|
||||
|
||||
x = input
|
||||
|
||||
x = ReLU()(XNormalization(XConv2D(ngf, 7, strides=1)(x)))
|
||||
|
||||
x = ReLU()(XNormalization(XConv2D(ngf*2, 3, strides=2)(x)))
|
||||
x = ReLU()(XNormalization(XConv2D(ngf*4, 3, strides=2)(x)))
|
||||
|
||||
for i in range(n_blocks):
|
||||
x = ResnetBlock(ngf*4, use_dropout=use_dropout)(x)
|
||||
|
||||
x = ReLU()(XNormalization(XConv2DTranspose(ngf*2, 3, strides=2)(x)))
|
||||
x = ReLU()(XNormalization(XConv2DTranspose(ngf , 3, strides=2)(x)))
|
||||
|
||||
x = XConv2D(output_nc, 7, strides=1, activation='tanh', use_bias=True)(x)
|
||||
|
||||
return x
|
||||
|
||||
return func
|
||||
|
||||
@staticmethod
|
||||
def UNet(output_nc, use_batch_norm, ngf=64, use_dropout=False):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
|
||||
if not use_batch_norm:
|
||||
use_bias = True
|
||||
def XNormalizationL():
|
||||
return InstanceNormalization (axis=-1)
|
||||
else:
|
||||
use_bias = False
|
||||
def XNormalizationL():
|
||||
return BatchNormalization (axis=-1)
|
||||
|
||||
def XNormalization(x):
|
||||
return XNormalizationL()(x)
|
||||
|
||||
XConv2D = partial(Conv2D, padding='same', use_bias=use_bias)
|
||||
XConv2DTranspose = partial(Conv2DTranspose, padding='same', use_bias=use_bias)
|
||||
|
||||
def func(input):
|
||||
|
||||
b,h,w,c = K.int_shape(input)
|
||||
|
||||
n_downs = get_power_of_two(w) - 4
|
||||
|
||||
Norm = XNormalizationL()
|
||||
Norm2 = XNormalizationL()
|
||||
Norm4 = XNormalizationL()
|
||||
Norm8 = XNormalizationL()
|
||||
|
||||
x = input
|
||||
|
||||
x = e1 = XConv2D( ngf, 4, strides=2, use_bias=True ) (x)
|
||||
|
||||
x = e2 = Norm2( XConv2D( ngf*2, 4, strides=2 )( LeakyReLU(0.2)(x) ) )
|
||||
x = e3 = Norm4( XConv2D( ngf*4, 4, strides=2 )( LeakyReLU(0.2)(x) ) )
|
||||
|
||||
l = []
|
||||
for i in range(n_downs):
|
||||
x = Norm8( XConv2D( ngf*8, 4, strides=2 )( LeakyReLU(0.2)(x) ) )
|
||||
l += [x]
|
||||
|
||||
x = XConv2D( ngf*8, 4, strides=2, use_bias=True )( LeakyReLU(0.2)(x) )
|
||||
|
||||
for i in range(n_downs):
|
||||
x = Norm8( XConv2DTranspose( ngf*8, 4, strides=2 )( ReLU()(x) ) )
|
||||
if i <= n_downs-2:
|
||||
x = Dropout(0.5)(x)
|
||||
x = Concatenate(axis=-1)([x, l[-i-1] ])
|
||||
|
||||
x = Norm4( XConv2DTranspose( ngf*4, 4, strides=2 )( ReLU()(x) ) )
|
||||
x = Concatenate(axis=-1)([x, e3])
|
||||
|
||||
x = Norm2( XConv2DTranspose( ngf*2, 4, strides=2 )( ReLU()(x) ) )
|
||||
x = Concatenate(axis=-1)([x, e2])
|
||||
|
||||
x = Norm( XConv2DTranspose( ngf, 4, strides=2 )( ReLU()(x) ) )
|
||||
x = Concatenate(axis=-1)([x, e1])
|
||||
|
||||
x = XConv2DTranspose(output_nc, 4, strides=2, activation='tanh', use_bias=True)( ReLU()(x) )
|
||||
|
||||
return x
|
||||
return func
|
||||
nnlib.UNet = UNet
|
||||
|
||||
@staticmethod
|
||||
def UNetTemporalPredictor(output_nc, use_batch_norm, ngf=64, use_dropout=False):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
def func(inputs):
|
||||
past_2_image_tensor, past_1_image_tensor = inputs
|
||||
|
||||
x = Concatenate(axis=-1)([ past_2_image_tensor, past_1_image_tensor ])
|
||||
x = UNet(3, use_batch_norm, ngf=ngf, use_dropout=use_dropout) (x)
|
||||
|
||||
return x
|
||||
|
||||
return func
|
||||
|
||||
@staticmethod
|
||||
def PatchDiscriminator(ndf=64):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
|
||||
#use_bias = True
|
||||
#def XNormalization(x):
|
||||
# return InstanceNormalization (axis=-1)(x)
|
||||
use_bias = False
|
||||
def XNormalization(x):
|
||||
return BatchNormalization (axis=-1)(x)
|
||||
|
||||
XConv2D = partial(Conv2D, use_bias=use_bias)
|
||||
|
||||
def func(input):
|
||||
b,h,w,c = K.int_shape(input)
|
||||
|
||||
x = input
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( ndf, 4, strides=2, padding='valid', use_bias=True)(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( ndf*2, 4, strides=2, padding='valid')(x)
|
||||
x = XNormalization(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( ndf*4, 4, strides=2, padding='valid')(x)
|
||||
x = XNormalization(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( ndf*8, 4, strides=2, padding='valid')(x)
|
||||
x = XNormalization(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( ndf*8, 4, strides=2, padding='valid')(x)
|
||||
x = XNormalization(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
return XConv2D( 1, 4, strides=1, padding='valid', use_bias=True, activation='sigmoid')(x)#
|
||||
return func
|
||||
|
||||
@staticmethod
|
||||
def NLayerDiscriminator(ndf=64, n_layers=3):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
|
||||
#use_bias = True
|
||||
#def XNormalization(x):
|
||||
# return InstanceNormalization (axis=-1)(x)
|
||||
use_bias = False
|
||||
def XNormalization(x):
|
||||
return BatchNormalization (axis=-1)(x)
|
||||
|
||||
XConv2D = partial(Conv2D, use_bias=use_bias)
|
||||
|
||||
def func(input):
|
||||
b,h,w,c = K.int_shape(input)
|
||||
|
||||
x = input
|
||||
|
||||
f = ndf
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( f, 4, strides=2, padding='valid', use_bias=True)(x)
|
||||
f = min( ndf*8, f*2 )
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
for i in range(n_layers):
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( f, 4, strides=2, padding='valid')(x)
|
||||
f = min( ndf*8, f*2 )
|
||||
x = XNormalization(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
x = XConv2D( f, 4, strides=1, padding='valid')(x)
|
||||
x = XNormalization(x)
|
||||
x = LeakyReLU(0.2)(x)
|
||||
|
||||
x = ZeroPadding2D((1,1))(x)
|
||||
return XConv2D( 1, 4, strides=1, padding='valid', use_bias=True, activation='sigmoid')(x)#
|
||||
return func
|
||||
|
||||
Model = RecycleGANModel
|
1
models/Model_RecycleGAN/__init__.py
Normal file
1
models/Model_RecycleGAN/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
from .Model import Model
|
|
@ -1,3 +1,4 @@
|
|||
from functools import partial
|
||||
import numpy as np
|
||||
|
||||
from nnlib import nnlib
|
||||
|
@ -385,20 +386,20 @@ class SAEModel(ModelBase):
|
|||
|
||||
#override
|
||||
def onGetPreview(self, sample):
|
||||
test_A = sample[0][1][0:4] #first 4 samples
|
||||
test_A_m = sample[0][2][0:4] #first 4 samples
|
||||
test_B = sample[1][1][0:4]
|
||||
test_B_m = sample[1][2][0:4]
|
||||
test_S = sample[0][1][0:4] #first 4 samples
|
||||
test_S_m = sample[0][2][0:4] #first 4 samples
|
||||
test_D = sample[1][1][0:4]
|
||||
test_D_m = sample[1][2][0:4]
|
||||
|
||||
if self.options['learn_mask']:
|
||||
S, D, SS, DD, DDM, SD, SDM = [ np.clip(x, 0.0, 1.0) for x in ([test_A,test_B] + self.AE_view ([test_A, test_B]) ) ]
|
||||
S, D, SS, DD, DDM, SD, SDM = [ np.clip(x, 0.0, 1.0) for x in ([test_S,test_D] + self.AE_view ([test_S, test_D]) ) ]
|
||||
DDM, SDM, = [ np.repeat (x, (3,), -1) for x in [DDM, SDM] ]
|
||||
else:
|
||||
S, D, SS, DD, SD, = [ np.clip(x, 0.0, 1.0) for x in ([test_A,test_B] + self.AE_view ([test_A, test_B]) ) ]
|
||||
S, D, SS, DD, SD, = [ np.clip(x, 0.0, 1.0) for x in ([test_S,test_D] + self.AE_view ([test_S, test_D]) ) ]
|
||||
|
||||
result = []
|
||||
st = []
|
||||
for i in range(0, len(test_A)):
|
||||
for i in range(0, len(test_S)):
|
||||
ar = S[i], SS[i], D[i], DD[i], SD[i]
|
||||
st.append ( np.concatenate ( ar, axis=1) )
|
||||
|
||||
|
@ -406,12 +407,12 @@ class SAEModel(ModelBase):
|
|||
|
||||
if self.options['learn_mask']:
|
||||
st_m = []
|
||||
for i in range(0, len(test_A)):
|
||||
ar = S[i], SS[i], D[i], DD[i]*DDM[i], SD[i]*(DDM[i]*SDM[i])
|
||||
for i in range(0, len(test_S)):
|
||||
ar = S[i]*test_S_m[i], SS[i], D[i]*test_D_m[i], DD[i]*DDM[i], SD[i]*(DDM[i]*SDM[i])
|
||||
st_m.append ( np.concatenate ( ar, axis=1) )
|
||||
|
||||
result += [ ('SAE masked', np.concatenate (st_m, axis=0 )), ]
|
||||
|
||||
|
||||
return result
|
||||
|
||||
def predictor_func (self, face):
|
||||
|
@ -485,57 +486,29 @@ class SAEModel(ModelBase):
|
|||
return x
|
||||
SAEModel.ResidualBlock = ResidualBlock
|
||||
|
||||
def ResidualBlock_pre (**base_kwargs):
|
||||
def func(*args, **kwargs):
|
||||
kwargs.update(base_kwargs)
|
||||
return ResidualBlock(*args, **kwargs)
|
||||
return func
|
||||
SAEModel.ResidualBlock_pre = ResidualBlock_pre
|
||||
|
||||
def downscale (dim, padding='zero', norm='', act='', **kwargs):
|
||||
def func(x):
|
||||
return Norm(norm)( Act(act) (Conv2D(dim, kernel_size=5, strides=2, padding=padding)(x)) )
|
||||
return func
|
||||
SAEModel.downscale = downscale
|
||||
|
||||
def downscale_pre (**base_kwargs):
|
||||
def func(*args, **kwargs):
|
||||
kwargs.update(base_kwargs)
|
||||
return downscale(*args, **kwargs)
|
||||
return func
|
||||
SAEModel.downscale_pre = downscale_pre
|
||||
|
||||
def upscale (dim, padding='zero', norm='', act='', **kwargs):
|
||||
def func(x):
|
||||
return SubpixelUpscaler()(Norm(norm)(Act(act)(Conv2D(dim * 4, kernel_size=3, strides=1, padding=padding)(x))))
|
||||
return func
|
||||
SAEModel.upscale = upscale
|
||||
|
||||
def upscale_pre (**base_kwargs):
|
||||
def func(*args, **kwargs):
|
||||
kwargs.update(base_kwargs)
|
||||
return upscale(*args, **kwargs)
|
||||
return func
|
||||
SAEModel.upscale_pre = upscale_pre
|
||||
|
||||
def to_bgr (output_nc, padding='zero', **kwargs):
|
||||
def func(x):
|
||||
return Conv2D(output_nc, kernel_size=5, padding=padding, activation='sigmoid')(x)
|
||||
return func
|
||||
SAEModel.to_bgr = to_bgr
|
||||
|
||||
def to_bgr_pre (**base_kwargs):
|
||||
def func(*args, **kwargs):
|
||||
kwargs.update(base_kwargs)
|
||||
return to_bgr(*args, **kwargs)
|
||||
return func
|
||||
SAEModel.to_bgr_pre = to_bgr_pre
|
||||
|
||||
@staticmethod
|
||||
def LIAEEncFlow(resolution, ch_dims, **kwargs):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
upscale = SAEModel.upscale_pre(**kwargs)
|
||||
downscale = SAEModel.downscale_pre(**kwargs)
|
||||
upscale = partial(SAEModel.upscale, **kwargs)
|
||||
downscale = partial(SAEModel.downscale, **kwargs)
|
||||
|
||||
def func(input):
|
||||
dims = K.int_shape(input)[-1]*ch_dims
|
||||
|
@ -553,7 +526,7 @@ class SAEModel(ModelBase):
|
|||
@staticmethod
|
||||
def LIAEInterFlow(resolution, ae_dims=256, **kwargs):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
upscale = SAEModel.upscale_pre(**kwargs)
|
||||
upscale = partial(SAEModel.upscale, **kwargs)
|
||||
lowest_dense_res=resolution // 16
|
||||
|
||||
def func(input):
|
||||
|
@ -568,10 +541,10 @@ class SAEModel(ModelBase):
|
|||
@staticmethod
|
||||
def LIAEDecFlow(output_nc,ch_dims, multiscale_count=1, add_residual_blocks=False, padding='zero', norm='', **kwargs):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
upscale = SAEModel.upscale_pre(**kwargs)
|
||||
to_bgr = SAEModel.to_bgr_pre(**kwargs)
|
||||
upscale = partial(SAEModel.upscale, **kwargs)
|
||||
to_bgr = partial(SAEModel.to_bgr, **kwargs)
|
||||
dims = output_nc * ch_dims
|
||||
ResidualBlock = SAEModel.ResidualBlock_pre(**kwargs)
|
||||
ResidualBlock = partial(SAEModel.ResidualBlock, **kwargs)
|
||||
|
||||
def func(input):
|
||||
x = input[0]
|
||||
|
@ -609,8 +582,8 @@ class SAEModel(ModelBase):
|
|||
@staticmethod
|
||||
def DFEncFlow(resolution, ae_dims, ch_dims, padding='zero', **kwargs):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
upscale = SAEModel.upscale_pre(padding=padding)
|
||||
downscale = SAEModel.downscale_pre(padding=padding)
|
||||
upscale = partial(SAEModel.upscale, padding=padding)
|
||||
downscale = partial(SAEModel.downscale, padding=padding)
|
||||
lowest_dense_res = resolution // 16
|
||||
|
||||
def func(input):
|
||||
|
@ -634,10 +607,10 @@ class SAEModel(ModelBase):
|
|||
@staticmethod
|
||||
def DFDecFlow(output_nc, ch_dims, multiscale_count=1, add_residual_blocks=False, padding='zero', **kwargs):
|
||||
exec (nnlib.import_all(), locals(), globals())
|
||||
upscale = SAEModel.upscale_pre(padding=padding)
|
||||
to_bgr = SAEModel.to_bgr_pre(padding=padding)
|
||||
upscale = partial(SAEModel.upscale, padding=padding)
|
||||
to_bgr = partial(SAEModel.to_bgr, padding=padding)
|
||||
dims = output_nc * ch_dims
|
||||
ResidualBlock = SAEModel.ResidualBlock_pre(padding=padding)
|
||||
ResidualBlock = partial(SAEModel.ResidualBlock, padding=padding)
|
||||
|
||||
def func(input):
|
||||
x = input[0]
|
||||
|
|
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue