Merge branch 'master' into preview_filenames

This commit is contained in:
Ognjen 2021-12-06 22:42:03 +01:00 committed by GitHub
commit 611447bf6f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 984 additions and 273 deletions

View file

@ -18,7 +18,7 @@ from .common import random_crop, normalize_channels, cut_odd_image, overlay_alph
from .SegIEPolys import *
from .blursharpen import LinearMotionBlur, blursharpen
from .blursharpen import LinearMotionBlur, blursharpen, gaussian_sharpen, unsharpen_mask
from .filters import apply_random_rgb_levels, \
apply_random_overlay_triangle, \

View file

@ -22,6 +22,8 @@ def blursharpen (img, sharpen_mode=0, kernel_size=3, amount=100):
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
return img
elif sharpen_mode == 3: #unsharpen_mask
img = unsharpen_mask(img, amount=amount)
elif amount < 0:
n = -amount
while n > 0:
@ -35,4 +37,18 @@ def blursharpen (img, sharpen_mode=0, kernel_size=3, amount=100):
n = max(n-10,0)
return img
return img
def gaussian_sharpen(img, amount=100, sigma=1.0):
img = cv2.addWeighted(img, 1.0 + (0.05 * amount), cv2.GaussianBlur(img, (0, 0), sigma), -(0.05 * amount), 0)
return img
def unsharpen_mask(img, amount=100, sigma=0.0, threshold = (5.0 / 255.0)):
radius = max(1, round(img.shape[0] * (amount / 100)))
kernel_size = int((radius * 2) + 1)
kernel_size = (kernel_size, kernel_size)
blur = cv2.GaussianBlur(img, kernel_size, sigma)
low_contrast_mask = (abs(img - blur) < threshold).astype("float32")
sharpened = (img * (1.0 + (0.05 * amount))) + (blur * -(0.05 * amount))
img = (img * (1.0 - low_contrast_mask)) + (sharpened * low_contrast_mask)
return img

View file

@ -131,6 +131,8 @@ if __name__ == "__main__":
'start_tensorboard' : arguments.start_tensorboard,
'dump_ckpt' : arguments.dump_ckpt,
'flask_preview' : arguments.flask_preview,
'config_training_file' : arguments.config_training_file,
'auto_gen_config' : arguments.auto_gen_config
}
from mainscripts import Trainer
Trainer.main(**kwargs)
@ -150,6 +152,8 @@ if __name__ == "__main__":
p.add_argument('--silent-start', action="store_true", dest="silent_start", default=False, help="Silent start. Automatically chooses Best GPU and last used model.")
p.add_argument('--tensorboard-logdir', action=fixPathAction, dest="tensorboard_dir", help="Directory of the tensorboard output files")
p.add_argument('--start-tensorboard', action="store_true", dest="start_tensorboard", default=False, help="Automatically start the tensorboard server preconfigured to the tensorboard-logdir")
p.add_argument('--config-training-file', action=fixPathAction, dest="config_training_file", help="Path to custom yaml configuration file")
p.add_argument('--auto-gen-config', action="store_true", dest="auto_gen_config", default=False, help="Saves a configuration file for each model used in the trainer. It'll have the same model name")
p.add_argument('--dump-ckpt', action="store_true", dest="dump_ckpt", default=False, help="Dump the model to ckpt format.")

View file

@ -146,17 +146,20 @@ def main (model_class_name=None,
io.log_info ("Use 'recover original filename' to determine the exact duplicates.")
io.log_info ("")
# build frames maunally
frames = []
for p in input_path_image_paths:
path = Path(p)
data = alignments.get(path.stem, None)
if data == None:
frame = InteractiveMergerSubprocessor.Frame(FrameInfo(frame_info=frame_info))
cur_path = Path(p)
data = alignments.get(cur_path.stem, None)
if data == None:
frame_info=FrameInfo(filepath=cur_path)
frame = InteractiveMergerSubprocessor.Frame(frame_info=frame_info)
else:
landmarks_list = [d[0] for d in data]
dfl_images_list = [d[1] for d in data]
frame_info=FrameInfo(filepath=path, landmarks_list=landmarks_list, dfl_images_list=dfl_images_list)
frame_info=FrameInfo(filepath=cur_path, landmarks_list=landmarks_list, dfl_images_list=dfl_images_list)
frame = InteractiveMergerSubprocessor.Frame(frame_info=frame_info)
frames.append(frame)

View file

@ -71,6 +71,7 @@ def trainerThread (s2c, c2s, e,
debug=False,
tensorboard_dir=None,
start_tensorboard=False,
config_training_file=None,
dump_ckpt=False,
**kwargs):
while True:
@ -101,6 +102,8 @@ def trainerThread (s2c, c2s, e,
force_gpu_idxs=force_gpu_idxs,
cpu_only=cpu_only,
silent_start=silent_start,
config_training_file=config_training_file,
auto_gen_config=kwargs.get("auto_gen_config", False),
debug=debug)
is_reached_goal = model.is_reached_iter_goal()

View file

@ -330,7 +330,13 @@ class InteractiveMergerSubprocessor(Subprocessor):
'z' : lambda cfg,shift_pressed: cfg.toggle_masked_hist_match(),
'x' : lambda cfg,shift_pressed: cfg.toggle_mask_mode(),
'c' : lambda cfg,shift_pressed: cfg.toggle_color_transfer_mode(),
'n' : lambda cfg,shift_pressed: cfg.toggle_sharpen_mode(),
'n' : lambda cfg,shift_pressed: cfg.toggle_sharpen_mode_multi(shift_pressed),
'9' : lambda cfg,shift_pressed: cfg.add_pre_sharpen_power(1),
'8' : lambda cfg,shift_pressed: cfg.add_pre_sharpen_power(-1),
'(' : lambda cfg,shift_pressed: cfg.add_morph_power(1),
'*' : lambda cfg,shift_pressed: cfg.add_morph_power(-1),
'b' : lambda cfg,shift_pressed: cfg.toggle_two_pass(),
'7' : lambda cfg,shift_pressed: cfg.toggle_debug_mode(),
}
self.masked_keys = list(self.masked_keys_funcs.keys())

View file

@ -16,7 +16,7 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
face_enhancer_func,
xseg_256_extract_func,
cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks, dfl_img):
img_size = img_bgr.shape[1], img_bgr.shape[0]
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
@ -60,13 +60,36 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
dst_face_mask_a_0 = np.clip(dst_face_mask_a_0, 0, 1)
if cfg.pre_sharpen_mode > 0 and cfg.pre_sharpen_power != 0:
if cfg.pre_sharpen_mode==1:
dst_face_bgr = imagelib.gaussian_sharpen(dst_face_bgr, amount=cfg.pre_sharpen_power)
elif cfg.pre_sharpen_mode==2:
dst_face_bgr = imagelib.unsharpen_mask(dst_face_bgr, amount=cfg.pre_sharpen_power)
dst_face_bgr = np.clip(dst_face_bgr, 0, 1, out=dst_face_bgr)
predictor_input_bgr = cv2.resize (dst_face_bgr, (input_size,input_size) )
predicted = predictor_func (predictor_input_bgr)
predicted = predictor_func (predictor_input_bgr, func_morph_factor = cfg.morph_power/100.0) if cfg.is_morphable else predictor_func (predictor_input_bgr)
prd_face_bgr = np.clip (predicted[0], 0, 1.0)
prd_face_mask_a_0 = np.clip (predicted[1], 0, 1.0)
prd_face_dst_mask_a_0 = np.clip (predicted[2], 0, 1.0)
if cfg.two_pass:
predicted_2 = predictor_func (prd_face_bgr, func_morph_factor = 1) if cfg.is_morphable else predictor_func (prd_face_bgr)
prd_face_bgr = np.clip (predicted_2[0], 0, 1.0)
prd_face_mask_a_0 = np.clip (predicted_2[1], 0, 1.0)
prd_face_dst_mask_a_0 = np.clip (predicted_2[2], 0, 1.0)
if cfg.debug_mode:
prd_face_bgr_unchanged = prd_face_bgr.copy()
if cfg.super_resolution_power != 0:
prd_face_bgr_enhanced = face_enhancer_func(prd_face_bgr, is_tanh=True, preserve_size=False)
@ -333,6 +356,15 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
if out_img is None:
out_img = img_bgr.copy()
if 'raw' not in cfg.mode and cfg.debug_mode:
ph, pw = predictor_input_bgr.shape[:2]
oh, ow = out_img.shape[:2]
out_img[oh-ph:,ow-pw:] = predictor_input_bgr
ph, pw = prd_face_bgr_unchanged.shape[:2]
out_img[oh-ph:,0:pw] = prd_face_bgr_unchanged
return out_img, out_merging_mask_a

View file

@ -21,7 +21,7 @@ class MergerConfig(object):
):
self.type = type
self.sharpen_dict = {0:"None", 1:'box', 2:'gaussian'}
self.sharpen_dict = {0:"None", 1:'box', 2:'gaussian', 3:'unsharpen'}
#default changeable params
self.sharpen_mode = sharpen_mode
@ -97,6 +97,9 @@ mask_mode_dict = {0:'full',
ctm_dict = { 0: "None", 1:"rct", 2:"lct", 3:"mkl", 4:"mkl-m", 5:"idt", 6:"idt-m", 7:"sot-m", 8:"mix-m" }
ctm_str_dict = {None:0, "rct":1, "lct":2, "mkl":3, "mkl-m":4, "idt":5, "idt-m":6, "sot-m":7, "mix-m":8 }
pre_sharpen_dict = {0:"None", 1:'gaussian'}, # 2:'unsharpen_mask'}
class MergerConfigMasked(MergerConfig):
def __init__(self, face_type=FaceType.FULL,
@ -114,6 +117,12 @@ class MergerConfigMasked(MergerConfig):
image_denoise_power = 0,
bicubic_degrade_power = 0,
color_degrade_power = 0,
pre_sharpen_power = 0,
pre_sharpen_mode=0,
two_pass = False,
morph_power = 100,
is_morphable = False,
debug_mode = False,
**kwargs
):
@ -142,6 +151,12 @@ class MergerConfigMasked(MergerConfig):
self.image_denoise_power = image_denoise_power
self.bicubic_degrade_power = bicubic_degrade_power
self.color_degrade_power = color_degrade_power
self.two_pass = two_pass
self.pre_sharpen_power = pre_sharpen_power
self.pre_sharpen_mode = pre_sharpen_mode
self.morph_power = morph_power
self.is_morphable = is_morphable
self.debug_mode = debug_mode
def copy(self):
return copy.copy(self)
@ -152,7 +167,25 @@ class MergerConfigMasked(MergerConfig):
def toggle_masked_hist_match(self):
if self.mode == 'hist-match':
self.masked_hist_match = not self.masked_hist_match
def toggle_two_pass(self):
self.two_pass = not self.two_pass
def toggle_debug_mode(self):
self.debug_mode = not self.debug_mode
def toggle_sharpen_mode_multi(self, pre_sharpen=False):
if pre_sharpen:
self.toggle_sharpen_mode_presharpen()
else:
self.toggle_sharpen_mode()
def toggle_sharpen_mode_presharpen(self):
a = list( pre_sharpen_dict.keys() )
self.pre_sharpen_mode = a[ (a.index(self.pre_sharpen_mode)+1) % len(a) ]
def add_hist_match_threshold(self, diff):
if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
self.hist_match_threshold = np.clip ( self.hist_match_threshold+diff , 0, 255)
@ -187,6 +220,13 @@ class MergerConfigMasked(MergerConfig):
def add_bicubic_degrade_power(self, diff):
self.bicubic_degrade_power = np.clip ( self.bicubic_degrade_power+diff, 0, 100)
def add_pre_sharpen_power(self, diff):
self.pre_sharpen_power = np.clip ( self.pre_sharpen_power+diff, 0, 200)
def add_morph_power(self, diff):
if self.is_morphable:
self.morph_power = np.clip ( self.morph_power+diff , 0, 100)
def ask_settings(self):
s = """Choose mode: \n"""
@ -214,6 +254,13 @@ class MergerConfigMasked(MergerConfig):
self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400)
self.blur_mask_modifier = np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400)
self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100)
self.two_pass = io.input_bool("Use two pass mode?", False, help_message="Can enhance results by feeding network output again to network.")
self.pre_sharpen_power = np.clip (io.input_int ("Choose pre_sharpen power", 0, help_message="Can enhance results by pre sharping before feeding it to the network.", add_info="0..100" ), 0, 200)
if self.is_morphable:
self.morph_power = np.clip (io.input_int ("Choose morph_power for moprhable models", 100, add_info="0..100" ), 0, 100)
self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50)
@ -249,7 +296,13 @@ class MergerConfigMasked(MergerConfig):
self.super_resolution_power == other.super_resolution_power and \
self.image_denoise_power == other.image_denoise_power and \
self.bicubic_degrade_power == other.bicubic_degrade_power and \
self.color_degrade_power == other.color_degrade_power
self.color_degrade_power == other.color_degrade_power and \
self.pre_sharpen_power == other.pre_sharpen_power and \
self.pre_sharpen_mode == other.pre_sharpen_mode and \
self.two_pass == other.two_pass and \
self.morph_power == other.morph_power and \
self.is_morphable == other.is_morphable and \
self.debug_mode == other.debug_mode
return False
@ -284,7 +337,14 @@ class MergerConfigMasked(MergerConfig):
r += (f"""image_denoise_power: {self.image_denoise_power}\n"""
f"""bicubic_degrade_power: {self.bicubic_degrade_power}\n"""
f"""color_degrade_power: {self.color_degrade_power}\n""")
r += f"""pre_sharpen_power: {self.pre_sharpen_power}\n"""
r += f"""pre_sharpen_mode: {pre_sharpen_dict[self.pre_sharpen_mode]}\n"""
r += f"""two_pass: {self.two_pass}\n"""
r += f"""morph_power: {self.morph_power}\n"""
#r += f"""is_morphable: {self.is_morphable}\n"""
r += f"""debug_mode: {self.debug_mode}\n"""
r += "================"
return r

Binary file not shown.

Before

Width:  |  Height:  |  Size: 260 KiB

After

Width:  |  Height:  |  Size: 323 KiB

Before After
Before After

View file

@ -1,5 +1,6 @@
import colorsys
import inspect
from io import FileIO
import json
import multiprocessing
import operator
@ -10,6 +11,9 @@ import tempfile
import time
import datetime
from pathlib import Path
import yaml
from jsonschema import validate, ValidationError
import models
import cv2
import numpy as np
@ -35,6 +39,8 @@ class ModelBase(object):
cpu_only=False,
debug=False,
force_model_class_name=None,
config_training_file=None,
auto_gen_config=False,
silent_start=False,
**kwargs):
self.is_training = is_training
@ -44,6 +50,8 @@ class ModelBase(object):
self.training_data_dst_path = training_data_dst_path
self.pretraining_data_path = pretraining_data_path
self.pretrained_model_path = pretrained_model_path
self.config_training_file = config_training_file
self.auto_gen_config = auto_gen_config
self.no_preview = no_preview
self.debug = debug
@ -141,13 +149,51 @@ class ModelBase(object):
self.choosed_gpu_indexes = None
model_data = {}
# True if yaml conf file exists
self.config_file_exists = False
# True if user chooses to read options external or internal conf file
self.read_from_conf = False
config_error = False
#check if config_training_file mode is enabled
if config_training_file is not None:
self.config_file_path = Path(config_training_file)
# Creates folder if folder doesn't exist
if not self.config_file_path.exists():
os.makedirs(self.config_file_path, exist_ok=True)
# Ask if user wants to read options from external or internal conf file only if external conf file exists
# or auto_gen_config is true
if Path(self.get_strpath_configuration_path()).exists() or self.auto_gen_config:
self.read_from_conf = io.input_bool(
f'Do you want to read training options from {"external" if self.auto_gen_config else "internal"} file?',
True,
'Read options from configuration file instead of asking one by one each option'
)
# If user decides to read from external or internal conf file
if self.read_from_conf:
# Try to read dictionary from external of internal yaml file according
# to the value of auto_gen_config
self.options = self.read_from_config_file(auto_gen=self.auto_gen_config)
# If options dict is empty options will be loaded from dat file
if self.options is None:
io.log_info(f"Config file validation error, check your config")
config_error = True
elif not self.options.keys():
io.log_info(f"Configuration file doesn't exist. A standard configuration file will be created.")
else:
self.config_file_exists = True
else:
io.log_info(f"Configuration file doesn't exist. A standard configuration file will be created.")
self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
if self.model_data_path.exists():
io.log_info (f"Loading {self.model_name} model...")
model_data = pickle.loads ( self.model_data_path.read_bytes() )
self.iter = model_data.get('iter',0)
if self.iter != 0:
self.options = model_data['options']
# read options from the .dat file only if the user chooses not to read options from the yaml file
if not self.config_file_exists:
self.options = model_data['options']
self.loss_history = model_data.get('loss_history', [])
self.sample_for_preview = model_data.get('sample_for_preview', None)
self.choosed_gpu_indexes = model_data.get('choosed_gpu_indexes', None)
@ -183,6 +229,11 @@ class ModelBase(object):
if self.is_first_run():
# save as default options only for first run model initialize
self.default_options_path.write_bytes( pickle.dumps (self.options) )
# save config file
if self.config_training_file is not None and not self.config_file_exists and not config_error:
self.save_config_file(self.auto_gen_config)
self.session_name = self.options.get('session_name', "")
self.autobackup_hour = self.options.get('autobackup_hour', 0)
self.maximum_n_backups = self.options.get('maximum_n_backups', 24)
@ -364,7 +415,7 @@ class ModelBase(object):
return ( ('loss_src', 0), ('loss_dst', 0) )
#overridable
def onGetPreview(self, sample, for_history=False, filenames=None):
def onGetPreview(self, sample, for_history=False):
#you can return multiple previews
#return [ ('preview_name',preview_rgb), ... ]
return []
@ -382,6 +433,10 @@ class ModelBase(object):
#return predictor_func, predictor_input_shape, MergerConfig() for the model
raise NotImplementedError
#overridable
def get_config_schema_path(self):
raise NotImplementedError
def get_pretraining_data_path(self):
return self.pretraining_data_path
@ -392,7 +447,7 @@ class ModelBase(object):
return self.target_iter != 0 and self.iter >= self.target_iter
def get_previews(self):
return self.onGetPreview ( self.last_sample, filenames=self.last_sample_filenames)
return self.onGetPreview ( self.last_sample )
def get_static_previews(self):
return self.onGetPreview (self.sample_for_preview)
@ -429,6 +484,60 @@ class ModelBase(object):
self.autobackup_start_time += self.autobackup_hour*3600
self.create_backup()
def read_from_config_file(self, auto_gen=False):
"""
Read yaml config file and saves it into a dictionary
Args:
auto_gen (bool, optional): True if you want that a yaml file is readed from model folder. Defaults to False.
Returns:
[dict]: Returns the options dictionary if everything is alright otherwise an empty dictionary.
"""
fun = self.get_strpath_configuration_path if not auto_gen else self.get_model_conf_path
try:
with open(fun(), 'r') as file, open(self.get_config_schema_path(), 'r') as schema:
data = yaml.safe_load(file)
validate(data, yaml.safe_load(schema))
except FileNotFoundError:
return {}
except ValidationError as ve:
io.log_err(f"{ve}")
return None
for key, value in data.items():
if isinstance(value, bool):
continue
if isinstance(value, int):
data[key] = np.int32(value)
elif isinstance(value, float):
data[key] = np.float64(value)
return data
def save_config_file(self, auto_gen=False):
"""
Saves options dictionary in a yaml file.
Args:
auto_gen ([bool], optional): True if you want that a yaml file is generated inside model folder for each model. Defaults to None.
"""
saving_dict = {}
for key, value in self.options.items():
if isinstance(value, np.int32) or isinstance(value, np.float64):
saving_dict[key] = value.item()
else:
saving_dict[key] = value
fun = self.get_strpath_configuration_path if not auto_gen else self.get_model_conf_path
try:
with open(fun(), 'w') as file:
yaml.dump(saving_dict, file, sort_keys=False)
except OSError as exception:
io.log_info('Impossible to write YAML configuration file -> ', exception)
def create_backup(self):
io.log_info ("Creating backup...", end='\r')
@ -476,19 +585,12 @@ class ModelBase(object):
def generate_next_samples(self):
sample = []
sample_filenames = []
for generator in self.generator_list:
if generator.is_initialized():
batch = generator.generate_next()
if type(batch) is tuple:
sample.append ( batch[0] )
sample_filenames.append( batch[1] )
else:
sample.append ( batch )
sample.append ( generator.generate_next() )
else:
sample.append ( [] )
self.last_sample = sample
self.last_sample_filenames = sample_filenames
return sample
#overridable
@ -568,9 +670,15 @@ class ModelBase(object):
def get_strpath_storage_for_file(self, filename):
return str( self.saved_models_path / ( self.get_model_name() + '_' + filename) )
def get_strpath_configuration_path(self):
return str(self.config_file_path / 'configuration_file.yaml')
def get_summary_path(self):
return self.get_strpath_storage_for_file('summary.txt')
def get_model_conf_path(self):
return self.get_strpath_storage_for_file('configuration_file.yaml')
def get_summary_text(self):
visible_options = self.options.copy()
visible_options.update(self.options_show_override)

View file

@ -10,16 +10,16 @@ from facelib import FaceType
from models import ModelBase
from samplelib import *
from core.cv2ex import *
from utils.label_face import label_face_filename
from pathlib import Path
class AMPModel(ModelBase):
#override
def on_initialize_options(self):
default_retraining_samples = self.options['retraining_samples'] = self.load_or_def_option('retraining_samples', False)
# default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_resolution = self.options['resolution'] = self.load_or_def_option('resolution', 224)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf')
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'f')
default_models_opt_on_gpu = self.options['models_opt_on_gpu'] = self.load_or_def_option('models_opt_on_gpu', True)
default_ae_dims = self.options['ae_dims'] = self.load_or_def_option('ae_dims', 256)
@ -28,12 +28,14 @@ class AMPModel(ModelBase):
default_e_dims = self.options['e_dims'] = self.load_or_def_option('e_dims', 64)
default_d_dims = self.options['d_dims'] = self.options.get('d_dims', None)
default_d_mask_dims = self.options['d_mask_dims'] = self.options.get('d_mask_dims', None)
default_morph_factor = self.options['morph_factor'] = self.options.get('morph_factor', 0.5)
default_eyes_mouth_prio = self.options['eyes_mouth_prio'] = self.load_or_def_option('eyes_mouth_prio', False)
default_morph_factor = self.options['morph_factor'] = self.load_or_def_option('morph_factor', 0.5)
default_masked_training = self.options['masked_training'] = self.load_or_def_option('masked_training', True)
default_eyes_prio = self.options['eyes_prio'] = self.load_or_def_option('eyes_prio', False)
default_mouth_prio = self.options['mouth_prio'] = self.load_or_def_option('mouth_prio', False)
default_uniform_yaw = self.options['uniform_yaw'] = self.load_or_def_option('uniform_yaw', False)
# Uncomment it just if you want to impelement other loss functions
#default_loss_function = self.options['loss_function'] = self.load_or_def_option('loss_function', 'SSIM')
default_loss_function = self.options['loss_function'] = self.load_or_def_option('loss_function', 'SSIM')
default_blur_out_mask = self.options['blur_out_mask'] = self.load_or_def_option('blur_out_mask', False)
@ -42,35 +44,42 @@ class AMPModel(ModelBase):
default_lr_dropout = self.options['lr_dropout'] = self.load_or_def_option('lr_dropout', 'n')
default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True)
default_random_hsv_power = self.options['random_hsv_power'] = self.load_or_def_option('random_hsv_power', 0.0)
default_random_downsample = self.options['random_downsample'] = self.load_or_def_option('random_downsample', False)
default_random_noise = self.options['random_noise'] = self.load_or_def_option('random_noise', False)
default_random_blur = self.options['random_blur'] = self.load_or_def_option('random_blur', False)
default_random_jpeg = self.options['random_jpeg'] = self.load_or_def_option('random_jpeg', False)
# Uncomment it just if you want to impelement other loss functions
#default_background_power = self.options['background_power'] = self.load_or_def_option('background_power', 0.0)
default_background_power = self.options['background_power'] = self.load_or_def_option('background_power', 0.0)
default_ct_mode = self.options['ct_mode'] = self.load_or_def_option('ct_mode', 'none')
default_random_color = self.options['random_color'] = self.load_or_def_option('random_color', False)
default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False)
default_use_fp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
ask_override = self.ask_override()
ask_override = False if self.read_from_conf else self.ask_override()
if self.is_first_run() or ask_override:
self.ask_autobackup_hour()
self.ask_write_preview_history()
self.ask_target_iter()
self.ask_retraining_samples()
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(8)
# self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.ask_autobackup_hour()
self.ask_session_name()
self.ask_maximum_n_backups()
self.ask_write_preview_history()
self.ask_target_iter()
self.ask_retraining_samples()
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(8)
self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
if self.is_first_run():
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 32 .")
resolution = np.clip ( (resolution // 32) * 32, 64, 640)
self.options['resolution'] = resolution
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['f','wf','head'], help_message="whole face / head").lower()
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 32 .")
resolution = np.clip ( (resolution // 32) * 32, 64, 640)
self.options['resolution'] = resolution
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head', 'custom'], help_message="Half / mid face / full face / whole face / head / custom. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset.").lower()
default_d_dims = self.options['d_dims'] = self.load_or_def_option('d_dims', 64)
@ -80,59 +89,83 @@ class AMPModel(ModelBase):
default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
if self.is_first_run():
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
self.options['inter_dims'] = np.clip ( io.input_int("Inter dimensions", default_inter_dims, add_info="32-2048", help_message="Should be equal or more than AutoEncoder dimensions. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 2048 )
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
self.options['inter_dims'] = np.clip ( io.input_int("Inter dimensions", default_inter_dims, add_info="32-2048", help_message="Should be equal or more than AutoEncoder dimensions. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 2048 )
e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['e_dims'] = e_dims + e_dims % 2
e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['e_dims'] = e_dims + e_dims % 2
d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['d_dims'] = d_dims + d_dims % 2
d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['d_dims'] = d_dims + d_dims % 2
d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="Typical fine value is 0.5"), 0.1, 0.5 )
self.options['morph_factor'] = morph_factor
d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
if self.is_first_run() or ask_override:
self.options['eyes_mouth_prio'] = io.input_bool ("Eyes and mouth priority", default_eyes_mouth_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction. Also makes the detail of the teeth higher.')
self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="Typical fine value is 0.5"), 0.1, 0.5 )
self.options['morph_factor'] = morph_factor
self.options['blur_out_mask'] = io.input_bool ("Blur out mask", default_blur_out_mask, help_message='Blurs nearby area outside of applied face mask of training samples. The result is the background near the face is smoothed and less noticeable on swapped face. The exact xseg mask in src and dst faceset is required.')
self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")
if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head':
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")
self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
self.options['mouth_prio'] = io.input_bool ("Mouth priority", default_mouth_prio, help_message='Helps to fix mouth problems during training by forcing the neural network to train mouth with higher priority similar to eyes ')
self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')
if self.options['masked_training']:
self.options['blur_out_mask'] = io.input_bool ("Blur out mask", default_blur_out_mask, help_message='Blurs nearby area outside of applied face mask of training samples. The result is the background near the face is smoothed and less noticeable on swapped face. The exact xseg mask in src and dst faceset is required.')
self.options['loss_function'] = io.input_str(f"Loss function", default_loss_function, ['SSIM', 'MS-SSIM', 'MS-SSIM+L1'], help_message="Change loss function used for image quality assessment.")
self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")
default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0)
default_gan_version = self.options['gan_version'] = self.load_or_def_option('gan_version', 2)
default_gan_patch_size = self.options['gan_patch_size'] = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8)
default_gan_dims = self.options['gan_dims'] = self.load_or_def_option('gan_dims', 16)
default_gan_smoothing = self.options['gan_smoothing'] = self.load_or_def_option('gan_smoothing', 0.1)
default_gan_noise = self.options['gan_noise'] = self.load_or_def_option('gan_noise', 0.0)
if self.is_first_run() or ask_override:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
self.options['adabelief'] = io.input_bool ("Use AdaBelief optimizer?", default_adabelief, help_message="Use AdaBelief optimizer. It requires more VRAM, but the accuracy and the generalization of the model is higher.")
self.options['adabelief'] = io.input_bool ("Use AdaBelief optimizer?", default_adabelief, help_message="Use AdaBelief optimizer. It requires more VRAM, but the accuracy and the generalization of the model is higher.")
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="")
self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="")
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="")
self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="")
self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 5.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 5.0 )
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 5.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 5.0 )
if self.options['gan_power'] != 0.0:
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
self.options['gan_patch_size'] = gan_patch_size
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-512", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 512 )
self.options['gan_dims'] = gan_dims
if self.options['gan_power'] != 0.0:
self.options['gan_version'] = np.clip (io.input_int("GAN version", default_gan_version, add_info="2 or 3", help_message="Choose GAN version (v2: 7/16/2020, v3: 1/3/2021):"), 2, 3)
#self.options['background_power'] = np.clip ( io.input_number("Background power", default_background_power, add_info="0.0..1.0", help_message="Learn the area outside of the mask. Helps smooth out area near the mask boundaries. Can be used at any time"), 0.0, 1.0 )
if self.options['gan_version'] == 3:
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
self.options['gan_patch_size'] = gan_patch_size
self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot', 'fs-aug'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
self.options['random_color'] = io.input_bool ("Random color", default_random_color, help_message="Samples are randomly rotated around the L axis in LAB colorspace, helps generalize training")
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
self.options['gan_dims'] = gan_dims
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
self.options['gan_smoothing'] = np.clip ( io.input_number("GAN label smoothing", default_gan_smoothing, add_info="0 - 0.5", help_message="Uses soft labels with values slightly off from 0/1 for GAN, has a regularizing effect"), 0, 0.5)
self.options['gan_noise'] = np.clip ( io.input_number("GAN noisy labels", default_gan_noise, add_info="0 - 0.5", help_message="Marks some images with the wrong label, helps prevent collapse"), 0, 0.5)
self.options['background_power'] = np.clip ( io.input_number("Background power", default_background_power, add_info="0.0..1.0", help_message="Learn the area outside of the mask. Helps smooth out area near the mask boundaries. Can be used at any time"), 0.0, 1.0 )
self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot', 'fs-aug'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
self.options['random_color'] = io.input_bool ("Random color", default_random_color, help_message="Samples are randomly rotated around the L axis in LAB colorspace, helps generalize training")
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims'])
@ -152,16 +185,26 @@ class AMPModel(ModelBase):
inter_res = self.inter_res = resolution // 32
d_dims = self.options['d_dims']
d_mask_dims = self.options['d_mask_dims']
face_type = self.face_type = {'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE,
'head' : FaceType.HEAD}[ self.options['face_type'] ]
self.face_type = {'h' : FaceType.HALF,
'mf' : FaceType.MID_FULL,
'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE,
'custom' : FaceType.CUSTOM,
'head' : FaceType.HEAD}[ self.options['face_type'] ]
morph_factor = self.options['morph_factor']
gan_power = self.gan_power = self.options['gan_power']
random_warp = self.options['random_warp']
random_hsv_power = self.options['random_hsv_power']
eyes_mouth_prio = self.options['eyes_mouth_prio']
blur_out_mask = self.options['blur_out_mask']
if 'eyes_mouth_prio' in self.options:
self.options.pop('eyes_mouth_prio')
bg_factor = self.options['background_power']
eyes_prio = self.options['eyes_prio']
mouth_prio = self.options['mouth_prio']
masked_training = self.options['masked_training']
blur_out_mask = self.options['blur_out_mask'] if masked_training else False
ct_mode = self.options['ct_mode']
if ct_mode == 'none':
@ -330,6 +373,14 @@ class AMPModel(ModelBase):
[self.decoder , 'decoder.npy'] ]
if self.is_training:
if gan_power != 0:
if self.options['gan_version'] == 2:
self.GAN = nn.UNetPatchDiscriminatorV2(patch_size=resolution//16, in_ch=input_ch, name="D_src", use_fp16=self.options['use_fp16'])
self.model_filename_list += [ [self.GAN, 'D_src_v2.npy'] ]
else:
self.GAN = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], use_fp16=self.options['use_fp16'], name="D_src")
self.model_filename_list += [ [self.GAN, 'GAN.npy'] ]
# Initialize optimizers
clipnorm = 1.0 if self.options['clipgrad'] else 0.0
if self.options['lr_dropout'] in ['y','cpu']:
@ -341,17 +392,19 @@ class AMPModel(ModelBase):
self.G_weights = self.encoder.get_weights() + self.decoder.get_weights()
OptimizerClass = nn.AdaBelief if adabelief else nn.RMSprop
self.src_dst_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='src_dst_opt')
self.src_dst_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, clipnorm=clipnorm, name='src_dst_opt')
self.src_dst_opt.initialize_variables (self.G_weights, vars_on_cpu=optimizer_vars_on_cpu)
self.model_filename_list += [ (self.src_dst_opt, 'src_dst_opt.npy') ]
if gan_power != 0:
self.GAN = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], use_fp16=use_fp16, name="GAN")
self.GAN_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='GAN_opt')
self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu)
self.model_filename_list += [ [self.GAN, 'GAN.npy'],
[self.GAN_opt, 'GAN_opt.npy'] ]
if self.options['gan_version'] == 2:
self.GAN_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='D_src_dst_opt')
self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights()
self.model_filename_list += [ (self.GAN_opt, 'D_src_v2_opt.npy') ]
else:
self.GAN_opt = OptimizerClass(lr=5e-5, lr_dropout=lr_dropout, lr_cos=lr_cos, clipnorm=clipnorm, name='GAN_opt')
self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights()
self.model_filename_list += [ (self.GAN_opt, 'GAN_opt.npy') ]
if self.is_training:
# Adjust batch size for multiple GPU
@ -372,6 +425,9 @@ class AMPModel(ModelBase):
gpu_G_loss_gradients = []
gpu_GAN_loss_gradients = []
def DLoss(labels,logits):
return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=[1,2,3])
def DLossOnes(logits):
return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits), logits=logits), axis=[1,2,3])
@ -387,10 +443,13 @@ class AMPModel(ModelBase):
gpu_warped_dst = self.warped_dst [batch_slice,:,:,:]
gpu_target_src = self.target_src [batch_slice,:,:,:]
gpu_target_dst = self.target_dst [batch_slice,:,:,:]
gpu_target_srcm = self.target_srcm[batch_slice,:,:,:]
gpu_target_srcm_all = self.target_srcm[batch_slice,:,:,:]
gpu_target_srcm_em = self.target_srcm_em[batch_slice,:,:,:]
gpu_target_dstm = self.target_dstm[batch_slice,:,:,:]
gpu_target_dstm_all = self.target_dstm[batch_slice,:,:,:]
gpu_target_dstm_em = self.target_dstm_em[batch_slice,:,:,:]
gpu_target_srcm_anti = 1-gpu_target_srcm_all
gpu_target_dstm_anti = 1-gpu_target_dstm_all
# process model tensors
gpu_src_code = self.encoder (gpu_warped_src)
@ -421,58 +480,119 @@ class AMPModel(ModelBase):
gpu_pred_dst_dst_list.append(gpu_pred_dst_dst), gpu_pred_dst_dstm_list.append(gpu_pred_dst_dstm)
gpu_pred_src_dst_list.append(gpu_pred_src_dst), gpu_pred_src_dstm_list.append(gpu_pred_src_dstm)
gpu_target_srcm_anti = 1-gpu_target_srcm
gpu_target_dstm_anti = 1-gpu_target_dstm
gpu_target_srcm_gblur = nn.gaussian_blur(gpu_target_srcm, resolution // 32)
gpu_target_dstm_gblur = nn.gaussian_blur(gpu_target_dstm, resolution // 32)
gpu_target_srcm_blur = tf.clip_by_value(gpu_target_srcm_gblur, 0, 0.5) * 2
gpu_target_dstm_blur = tf.clip_by_value(gpu_target_dstm_gblur, 0, 0.5) * 2
gpu_target_srcm_anti_blur = 1.0-gpu_target_srcm_blur
gpu_target_dstm_anti_blur = 1.0-gpu_target_dstm_blur
if blur_out_mask:
sigma = resolution / 128
x = nn.gaussian_blur(gpu_target_src*gpu_target_srcm_anti, sigma)
y = 1-nn.gaussian_blur(gpu_target_srcm, sigma)
y = 1-nn.gaussian_blur(gpu_target_srcm_all, sigma)
y = tf.where(tf.equal(y, 0), tf.ones_like(y), y)
gpu_target_src = gpu_target_src*gpu_target_srcm + (x/y)*gpu_target_srcm_anti
gpu_target_src = gpu_target_src*gpu_target_srcm_all + (x/y)*gpu_target_srcm_anti
x = nn.gaussian_blur(gpu_target_dst*gpu_target_dstm_anti, sigma)
y = 1-nn.gaussian_blur(gpu_target_dstm, sigma)
y = 1-nn.gaussian_blur(gpu_target_dstm_all, sigma)
y = tf.where(tf.equal(y, 0), tf.ones_like(y), y)
gpu_target_dst = gpu_target_dst*gpu_target_dstm + (x/y)*gpu_target_dstm_anti
gpu_target_dst = gpu_target_dst*gpu_target_dstm_all + (x/y)*gpu_target_dstm_anti
gpu_target_src_masked = gpu_target_src*gpu_target_srcm_blur
gpu_target_dst_masked = gpu_target_dst*gpu_target_dstm_blur
gpu_target_src_anti_masked = gpu_target_src*gpu_target_srcm_anti_blur
gpu_target_dst_anti_masked = gpu_target_dst*gpu_target_dstm_anti_blur
# unpack masks from one combined mask
gpu_target_srcm = tf.clip_by_value (gpu_target_srcm_all, 0, 1)
gpu_target_dstm = tf.clip_by_value (gpu_target_dstm_all, 0, 1)
gpu_target_srcm_eye_mouth = tf.clip_by_value (gpu_target_srcm_em-1, 0, 1)
gpu_target_dstm_eye_mouth = tf.clip_by_value (gpu_target_dstm_em-1, 0, 1)
gpu_target_srcm_mouth = tf.clip_by_value (gpu_target_srcm_em-2, 0, 1)
gpu_target_dstm_mouth = tf.clip_by_value (gpu_target_dstm_em-2, 0, 1)
gpu_target_srcm_eyes = tf.clip_by_value (gpu_target_srcm_eye_mouth-gpu_target_srcm_mouth, 0, 1)
gpu_target_dstm_eyes = tf.clip_by_value (gpu_target_dstm_eye_mouth-gpu_target_dstm_mouth, 0, 1)
gpu_target_srcm_gblur = nn.gaussian_blur(gpu_target_srcm, resolution // 32)
gpu_target_dstm_gblur = nn.gaussian_blur(gpu_target_dstm, resolution // 32)
gpu_target_srcm_blur = tf.clip_by_value(gpu_target_srcm_gblur, 0, 0.5) * 2
gpu_target_dstm_blur = tf.clip_by_value(gpu_target_dstm_gblur, 0, 0.5) * 2
gpu_target_srcm_anti_blur = 1.0-gpu_target_srcm_blur
gpu_target_dstm_anti_blur = 1.0-gpu_target_dstm_blur
gpu_target_src_masked = gpu_target_src*gpu_target_srcm_blur if masked_training else gpu_target_src
gpu_target_dst_masked = gpu_target_dst*gpu_target_dstm_blur if masked_training else gpu_target_dst
gpu_target_src_anti_masked = gpu_target_src*gpu_target_srcm_anti_blur if masked_training else gpu_pred_src_src
gpu_target_dst_anti_masked = gpu_target_dst*gpu_target_dstm_anti_blur if masked_training else gpu_pred_dst_dst
gpu_pred_src_src_masked = gpu_pred_src_src*gpu_target_srcm_blur
gpu_pred_dst_dst_masked = gpu_pred_dst_dst*gpu_target_dstm_blur
gpu_pred_src_src_anti_masked = gpu_pred_src_src*gpu_target_srcm_anti_blur
gpu_pred_dst_dst_anti_masked = gpu_pred_dst_dst*gpu_target_dstm_anti_blur
# Structural loss
gpu_src_loss = tf.reduce_mean (5*nn.dssim(gpu_target_src_masked, gpu_pred_src_src_masked, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
gpu_src_loss += tf.reduce_mean (5*nn.dssim(gpu_target_src_masked, gpu_pred_src_src_masked, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1])
gpu_dst_loss = tf.reduce_mean (5*nn.dssim(gpu_target_dst_masked, gpu_pred_dst_dst_masked, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1])
gpu_dst_loss += tf.reduce_mean (5*nn.dssim(gpu_target_dst_masked, gpu_pred_dst_dst_masked, max_val=1.0, filter_size=int(resolution/23.2) ), axis=[1])
# Pixel loss
gpu_src_loss += tf.reduce_mean (10*tf.square(gpu_target_src_masked-gpu_pred_src_src_masked), axis=[1,2,3])
gpu_dst_loss += tf.reduce_mean (10*tf.square(gpu_target_dst_masked-gpu_pred_dst_dst_masked), axis=[1,2,3])
if self.options['loss_function'] == 'MS-SSIM':
gpu_src_loss = 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution)(gpu_target_src_masked, gpu_pred_src_src_masked, max_val=1.0)
gpu_src_loss += tf.reduce_mean ( 10*tf.square ( gpu_target_src_masked - gpu_pred_src_src_masked ), axis=[1,2,3])
gpu_dst_loss = 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution)(gpu_target_dst_masked, gpu_pred_dst_dst_masked, max_val=1.0)
gpu_dst_loss += tf.reduce_mean ( 10*tf.square ( gpu_target_dst_masked - gpu_pred_dst_dst_masked ), axis=[1,2,3])
if bg_factor > 0:
gpu_dst_loss += bg_factor * 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution)(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0)
gpu_dst_loss += bg_factor * tf.reduce_mean ( 10*tf.square ( gpu_target_dst - gpu_pred_dst_dst ), axis=[1,2,3])
gpu_src_loss += bg_factor * 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution)(gpu_target_src, gpu_pred_src_src, max_val=1.0)
gpu_src_loss += bg_factor * tf.reduce_mean ( 10*tf.square ( gpu_target_src - gpu_pred_src_src ), axis=[1,2,3])
elif self.options['loss_function'] == 'MS-SSIM+L1':
gpu_src_loss = 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution, use_l1=True)(gpu_target_src_masked, gpu_pred_src_src_masked, max_val=1.0)
gpu_dst_loss = 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution, use_l1=True)(gpu_target_dst_masked, gpu_pred_dst_dst_masked, max_val=1.0)
if bg_factor > 0:
gpu_dst_loss += bg_factor * 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution, use_l1=True)(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0)
gpu_src_loss += bg_factor * 10 * nn.MsSsim(bs_per_gpu, input_ch, resolution, use_l1=True)(gpu_target_src, gpu_pred_src_src, max_val=1.0)
else:
gpu_src_loss = tf.reduce_mean (5*nn.dssim(gpu_target_src_masked, gpu_pred_src_src_masked, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
gpu_src_loss += tf.reduce_mean (5*nn.dssim(gpu_target_src_masked, gpu_pred_src_src_masked, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1])
gpu_dst_loss = tf.reduce_mean (5*nn.dssim(gpu_target_dst_masked, gpu_pred_dst_dst_masked, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1])
gpu_dst_loss += tf.reduce_mean (5*nn.dssim(gpu_target_dst_masked, gpu_pred_dst_dst_masked, max_val=1.0, filter_size=int(resolution/23.2) ), axis=[1])
# Pixel loss
gpu_dst_loss += tf.reduce_mean (10*tf.square(gpu_target_dst_masked-gpu_pred_dst_dst_masked), axis=[1,2,3])
gpu_src_loss += tf.reduce_mean (10*tf.square(gpu_target_src_masked-gpu_pred_src_src_masked), axis=[1,2,3])
if bg_factor > 0:
gpu_dst_loss += bg_factor * tf.reduce_mean ( 5*nn.dssim(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
gpu_dst_loss += bg_factor * tf.reduce_mean ( 5*nn.dssim(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1])
gpu_src_loss += bg_factor * tf.reduce_mean ( 5*nn.dssim(gpu_target_src, gpu_pred_src_src, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
gpu_src_loss += bg_factor * tf.reduce_mean ( 5*nn.dssim(gpu_target_src, gpu_pred_src_src, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1])
if bg_factor > 0:
gpu_dst_loss += bg_factor * tf.reduce_mean ( 10*tf.square ( gpu_target_dst - gpu_pred_dst_dst ), axis=[1,2,3])
gpu_src_loss += bg_factor * tf.reduce_mean ( 10*tf.square ( gpu_target_src - gpu_pred_src_src ), axis=[1,2,3])
# Eyes+mouth prio loss
if eyes_mouth_prio:
gpu_src_loss += tf.reduce_mean (300*tf.abs (gpu_target_src*gpu_target_srcm_em-gpu_pred_src_src*gpu_target_srcm_em), axis=[1,2,3])
gpu_dst_loss += tf.reduce_mean (300*tf.abs (gpu_target_dst*gpu_target_dstm_em-gpu_pred_dst_dst*gpu_target_dstm_em), axis=[1,2,3])
# if eyes_mouth_prio:
# gpu_src_loss += tf.reduce_mean (300*tf.abs (gpu_target_src*gpu_target_srcm_em-gpu_pred_src_src*gpu_target_srcm_em), axis=[1,2,3])
# gpu_dst_loss += tf.reduce_mean (300*tf.abs (gpu_target_dst*gpu_target_dstm_em-gpu_pred_dst_dst*gpu_target_dstm_em), axis=[1,2,3])
if eyes_prio or mouth_prio:
if eyes_prio and mouth_prio:
gpu_target_part_mask_src = gpu_target_srcm_eye_mouth
gpu_target_part_mask_dst = gpu_target_dstm_eye_mouth
elif eyes_prio:
gpu_target_part_mask_src = gpu_target_srcm_eyes
gpu_target_part_mask_dst = gpu_target_dstm_eyes
elif mouth_prio:
gpu_target_part_mask_src = gpu_target_srcm_mouth
gpu_target_part_mask_dst = gpu_target_dstm_mouth
gpu_src_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_src*gpu_target_part_mask_src - gpu_pred_src_src*gpu_target_part_mask_src ), axis=[1,2,3])
gpu_dst_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_dst*gpu_target_part_mask_dst - gpu_pred_dst_dst*gpu_target_part_mask_dst ), axis=[1,2,3])
# Mask loss
gpu_src_loss += tf.reduce_mean ( 10*tf.square( gpu_target_srcm - gpu_pred_src_srcm ),axis=[1,2,3] )
gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dstm - gpu_pred_dst_dstm ),axis=[1,2,3] )
gpu_src_loss += tf.reduce_mean ( 10*tf.square( gpu_target_srcm_all - gpu_pred_src_srcm ),axis=[1,2,3] )
gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dstm_all - gpu_pred_dst_dstm ),axis=[1,2,3] )
gpu_src_losses += [gpu_src_loss]
gpu_dst_losses += [gpu_dst_loss]
@ -483,26 +603,50 @@ class AMPModel(ModelBase):
if gan_power != 0:
gpu_pred_src_src_d, gpu_pred_src_src_d2 = self.GAN(gpu_pred_src_src_masked)
gpu_pred_dst_dst_d, gpu_pred_dst_dst_d2 = self.GAN(gpu_pred_dst_dst_masked)
gpu_target_src_d, gpu_target_src_d2 = self.GAN(gpu_target_src_masked)
gpu_target_dst_d, gpu_target_dst_d2 = self.GAN(gpu_target_dst_masked)
gpu_GAN_loss = (DLossOnes (gpu_target_src_d) + DLossOnes (gpu_target_src_d2) + \
DLossZeros(gpu_pred_src_src_d) + DLossZeros(gpu_pred_src_src_d2) + \
DLossOnes (gpu_target_dst_d) + DLossOnes (gpu_target_dst_d2) + \
DLossZeros(gpu_pred_dst_dst_d) + DLossZeros(gpu_pred_dst_dst_d2)
) * (1.0 / 8)
def get_smooth_noisy_labels(label, tensor, smoothing=0.1, noise=0.05):
num_labels = self.batch_size
for d in tensor.get_shape().as_list()[1:]:
num_labels *= d
probs = tf.math.log([[noise, 1-noise]]) if label == 1 else tf.math.log([[1-noise, noise]])
x = tf.random.categorical(probs, num_labels)
x = tf.cast(x, tf.float32)
x = tf.math.scalar_mul(1-smoothing, x)
# x = x + (smoothing/num_labels)
x = tf.reshape(x, (self.batch_size,) + tuple(tensor.get_shape().as_list()[1:]))
return x
smoothing = self.options['gan_smoothing']
noise = self.options['gan_noise']
gpu_pred_src_src_d_ones = tf.ones_like(gpu_pred_src_src_d)
gpu_pred_src_src_d2_ones = tf.ones_like(gpu_pred_src_src_d2)
gpu_pred_src_src_d_smooth_zeros = get_smooth_noisy_labels(0, gpu_pred_src_src_d, smoothing=smoothing, noise=noise)
gpu_pred_src_src_d2_smooth_zeros = get_smooth_noisy_labels(0, gpu_pred_src_src_d2, smoothing=smoothing, noise=noise)
gpu_target_src_d, gpu_target_src_d2 = self.GAN(gpu_target_src_masked)
gpu_target_src_d_smooth_ones = get_smooth_noisy_labels(1, gpu_target_src_d, smoothing=smoothing, noise=noise)
gpu_target_src_d2_smooth_ones = get_smooth_noisy_labels(1, gpu_target_src_d2, smoothing=smoothing, noise=noise)
gpu_GAN_loss = DLoss(gpu_target_src_d_smooth_ones, gpu_target_src_d) \
+ DLoss(gpu_pred_src_src_d_smooth_zeros, gpu_pred_src_src_d) \
+ DLoss(gpu_target_src_d2_smooth_ones, gpu_target_src_d2) \
+ DLoss(gpu_pred_src_src_d2_smooth_zeros, gpu_pred_src_src_d2)
gpu_GAN_loss_gradients += [ nn.gradients (gpu_GAN_loss, self.GAN.get_weights() ) ]
gpu_G_loss += (DLossOnes(gpu_pred_src_src_d) + DLossOnes(gpu_pred_src_src_d2) + \
DLossOnes(gpu_pred_dst_dst_d) + DLossOnes(gpu_pred_dst_dst_d2)
) * gan_power
# Minimal src-src-bg rec with total_variation_mse to suppress random bright dots from gan
gpu_G_loss += 0.000001*nn.total_variation_mse(gpu_pred_src_src)
gpu_G_loss += 0.02*tf.reduce_mean(tf.square(gpu_pred_src_src_anti_masked-gpu_target_src_anti_masked),axis=[1,2,3] )
gpu_G_loss += gan_power*(DLoss(gpu_pred_src_src_d_ones, gpu_pred_src_src_d) + \
DLoss(gpu_pred_src_src_d2_ones, gpu_pred_src_src_d2))
if masked_training:
# Minimal src-src-bg rec with total_variation_mse to suppress random bright dots from gan
gpu_G_loss += 0.000001*nn.total_variation_mse(gpu_pred_src_src)
gpu_G_loss += 0.02*tf.reduce_mean(tf.square(gpu_pred_src_src_anti_masked-gpu_target_src_anti_masked),axis=[1,2,3] )
gpu_G_loss_gradients += [ nn.gradients ( gpu_G_loss, self.G_weights ) ]
@ -616,6 +760,7 @@ class AMPModel(ModelBase):
'random_blur': self.options['random_blur'],
'random_jpeg': self.options['random_jpeg'],
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode,
'random_hsv_shift_amount' : random_hsv_power,
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False,
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode,
@ -743,7 +888,7 @@ class AMPModel(ModelBase):
return ( ('src_loss', np.mean(src_loss) ), ('dst_loss', np.mean(dst_loss) ), )
#override
def onGetPreview(self, samples, for_history=False, filenames=None):
def onGetPreview(self, samples, for_history=False):
( (warped_src, target_src, target_srcm, target_srcm_em),
(warped_dst, target_dst, target_dstm, target_dstm_em) ) = samples
@ -775,10 +920,6 @@ class AMPModel(ModelBase):
i = np.random.randint(n_samples) if not for_history else 0
if filenames is not None and len(filenames) > 0:
S[i] = label_face_filename(S[i], filenames[0][i])
D[i] = label_face_filename(D[i], filenames[1][i])
st = [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ]
st += [ np.concatenate ((SS[i], DD[i], SD_100[i] ), axis=1) ]
@ -803,13 +944,16 @@ class AMPModel(ModelBase):
#override
def get_MergerConfig(self):
morph_factor = np.clip ( io.input_number ("Morph factor", 1.0, add_info="0.0 .. 1.0"), 0.0, 1.0 )
def predictor_morph(face):
return self.predictor_func(face, morph_factor)
def predictor_morph(face, func_morph_factor=1.0):
return self.predictor_func(face, func_morph_factor)
import merger
return predictor_morph, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type, default_mode = 'overlay')
return predictor_morph, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type, default_mode = 'overlay', is_morphable=True)
#override
def get_config_schema_path(self):
config_path = Path(__file__).parent.absolute() / Path("config_schema.json")
return config_path
Model = AMPModel

View file

@ -0,0 +1,256 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$ref": "#/definitions/dfl_config",
"definitions": {
"dfl_config": {
"type": "object",
"additionalProperties": false,
"properties": {
"use_fp16": {
"type": "boolean"
},
"morph_factor": {
"type": "number",
"minimum":0.0,
"maximum":0.5
},
"resolution": {
"type": "integer",
"minimum": 64,
"maximum": 640,
"multipleOf": 16
},
"face_type": {
"type": "string",
"enum": [
"h",
"mf",
"f",
"wf",
"head",
"custom"
]
},
"models_opt_on_gpu": {
"type": "boolean"
},
"ae_dims": {
"type": "integer",
"minimum": 32,
"maximum": 1024
},
"e_dims": {
"type": "integer",
"minimum": 16,
"maximum": 256,
"multipleOf": 2
},
"inter_dims": {
"type": "integer",
"minimum": 32,
"maximum": 2048,
"multipleOf": 2
},
"d_dims": {
"type": "integer",
"minimum": 16,
"maximum": 256,
"multipleOf": 2
},
"d_mask_dims": {
"type": "integer",
"minimum": 16,
"maximum": 256,
"multipleOf": 2
},
"masked_training": {
"type": "boolean"
},
"eyes_prio": {
"type": "boolean"
},
"mouth_prio": {
"type": "boolean"
},
"uniform_yaw": {
"type": "boolean"
},
"blur_out_mask": {
"type": "boolean"
},
"adabelief": {
"type": "boolean"
},
"lr_dropout": {
"type": "string",
"enum": [
"y",
"n",
"cpu"
]
},
"loss_function": {
"type": "string",
"enum": [
"SSIM",
"MS-SSIM",
"MS-SSIM+L1"
]
},
"random_warp": {
"type": "boolean"
},
"random_hsv_power": {
"type": "number",
"minimum": 0.0,
"maximum": 0.3
},
"random_downsample": {
"type": "boolean"
},
"random_noise": {
"type": "boolean"
},
"random_blur": {
"type": "boolean"
},
"random_jpeg": {
"type": "boolean"
},
"background_power": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0
},
"ct_mode": {
"type": "string",
"enum": [
"none",
"rct",
"lct",
"mkl",
"idt",
"sot"
]
},
"random_color": {
"type": "boolean"
},
"clipgrad": {
"type": "boolean"
},
"pretrain": {
"type": "boolean"
},
"session_name": {
"type": "string"
},
"autobackup_hour": {
"type": "integer",
"minimum": 0,
"maximum": 24
},
"maximum_n_backups": {
"type": "integer"
},
"write_preview_history": {
"type": "boolean"
},
"target_iter": {
"type": "integer",
"minimum": 0
},
"retraining_samples": {
"type": "boolean"
},
"random_src_flip": {
"type": "boolean"
},
"random_dst_flip": {
"type": "boolean"
},
"batch_size": {
"type": "integer",
"minimum": 1
},
"gan_power": {
"type": "number",
"minimum": 0.0,
"maximum": 5.0
},
"gan_version": {
"type": "integer",
"minimum": 2,
"maximum": 3
},
"gan_patch_size": {
"type": "integer",
"minimum": 3,
"maximum": 640
},
"gan_dims": {
"type": "integer",
"minimum": 4,
"maximum": 512
},
"gan_smoothing": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5
},
"gan_noise": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5
}
},
"required": [
"adabelief",
"ae_dims",
"autobackup_hour",
"background_power",
"batch_size",
"blur_out_mask",
"clipgrad",
"ct_mode",
"d_dims",
"d_mask_dims",
"e_dims",
"inter_dims",
"morph_factor",
"eyes_prio",
"face_type",
"gan_dims",
"gan_noise",
"gan_patch_size",
"gan_power",
"gan_smoothing",
"gan_version",
"loss_function",
"lr_dropout",
"masked_training",
"maximum_n_backups",
"models_opt_on_gpu",
"mouth_prio",
"pretrain",
"random_blur",
"random_color",
"random_downsample",
"random_dst_flip",
"random_hsv_power",
"random_jpeg",
"random_noise",
"random_src_flip",
"random_warp",
"resolution",
"retraining_samples",
"session_name",
"target_iter",
"uniform_yaw",
"use_fp16",
"write_preview_history"
],
"title": "dfl_config"
}
}
}

View file

@ -9,9 +9,17 @@ from core.leras import nn
from facelib import FaceType
from models import ModelBase
from samplelib import *
from utils.label_face import label_face_filename
from pathlib import Path
class QModel(ModelBase):
#override
def on_initialize_options(self):
ask_override = False if self.read_from_conf else self.ask_override()
if self.is_first_run() or ask_override:
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.ask_batch_size()
#override
def on_initialize(self):
device_config = nn.getCurrentDeviceConfig()
@ -81,7 +89,7 @@ class QModel(ModelBase):
if self.is_training:
# Adjust batch size for multiple GPU
gpu_count = max(1, len(devices) )
bs_per_gpu = max(1, 4 // gpu_count)
bs_per_gpu = max(1, self.get_batch_size() // gpu_count)
self.set_batch_size( gpu_count*bs_per_gpu)
# Compute losses per GPU
@ -279,7 +287,7 @@ class QModel(ModelBase):
return ( ('src_loss', src_loss), ('dst_loss', dst_loss), )
#override
def onGetPreview(self, samples, for_history=False, filenames=None):
def onGetPreview(self, samples, for_history=False):
( (warped_src, target_src, target_srcm),
(warped_dst, target_dst, target_dstm) ) = samples
@ -289,12 +297,6 @@ class QModel(ModelBase):
target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )]
n_samples = min(4, self.get_batch_size() )
if filenames is not None and len(filenames) > 0:
for i in range(n_samples):
S[i] = label_face_filename(S[i], filenames[0][i])
D[i] = label_face_filename(D[i], filenames[1][i])
result = []
st = []
for i in range(n_samples):
@ -329,5 +331,9 @@ class QModel(ModelBase):
return self.predictor_func, (self.resolution, self.resolution, 3), merger.MergerConfigMasked(face_type=self.face_type,
default_mode = 'overlay',
)
#override
def get_config_schema_path(self):
config_path = Path(__file__).parent.absolute() / Path("config_schema.json")
return config_path
Model = QModel

View file

@ -0,0 +1,20 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$ref": "#/definitions/dfl_config",
"definitions": {
"dfl_config": {
"type": "object",
"additionalProperties": false,
"properties": {
"batch_size": {
"type": "integer",
"minimum": 1
}
},
"required": [
"batch_size",
],
"title": "dfl_config"
}
}
}

View file

@ -9,7 +9,8 @@ from core.leras import nn
from facelib import FaceType
from models import ModelBase
from samplelib import *
from utils.label_face import label_face_filename
from pathlib import Path
class SAEHDModel(ModelBase):
@ -29,7 +30,7 @@ class SAEHDModel(ModelBase):
min_res = 64
max_res = 640
#default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_usefp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
default_resolution = self.options['resolution'] = self.load_or_def_option('resolution', 128)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'f')
default_models_opt_on_gpu = self.options['models_opt_on_gpu'] = self.load_or_def_option('models_opt_on_gpu', True)
@ -69,88 +70,92 @@ class SAEHDModel(ModelBase):
default_random_color = self.options['random_color'] = self.load_or_def_option('random_color', False)
default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False)
default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False)
default_use_fp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
#default_use_fp16 = self.options['use_fp16'] = self.load_or_def_option('use_fp16', False)
ask_override = self.ask_override()
ask_override = False if self.read_from_conf else self.ask_override()
if self.is_first_run() or ask_override:
self.ask_session_name()
self.ask_autobackup_hour()
self.ask_maximum_n_backups()
self.ask_write_preview_history()
self.ask_target_iter()
self.ask_retraining_samples()
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(suggest_batch_size)
#self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.ask_session_name()
self.ask_autobackup_hour()
self.ask_maximum_n_backups()
self.ask_write_preview_history()
self.ask_target_iter()
self.ask_retraining_samples()
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(suggest_batch_size)
self.options['use_fp16'] = io.input_bool ("Use fp16", default_usefp16, help_message='Increases training/inference speed, reduces model size. Model may crash. Enable it after 1-5k iters.')
if self.is_first_run():
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16 and 32 for -d archi.")
resolution = np.clip ( (resolution // 16) * 16, min_res, max_res)
self.options['resolution'] = resolution
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head', 'custom'], help_message="Half / mid face / full face / whole face / head / custom. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset.").lower()
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16 and 32 for -d archi.")
resolution = np.clip ( (resolution // 16) * 16, min_res, max_res)
self.options['resolution'] = resolution
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head', 'custom'], help_message="Half / mid face / full face / whole face / head / custom. Half face has better resolution, but covers less area of cheeks. Mid face is 30% wider than half face. 'Whole face' covers full area of face include forehead. 'head' covers full head, but requires XSeg for src and dst faceset.").lower()
while True:
archi = io.input_str ("AE architecture", default_archi, help_message=\
"""
'df' keeps more identity-preserved face.
'liae' can fix overly different face shapes.
'-u' increased likeness of the face.
'-d' (experimental) doubling the resolution using the same computation cost.
Examples: df, liae, df-d, df-ud, liae-ud, ...
""").lower()
while True:
archi = io.input_str ("AE architecture", default_archi, help_message=\
"""
'df' keeps more identity-preserved face.
'liae' can fix overly different face shapes.
'-u' increased likeness of the face.
'-d' (experimental) doubling the resolution using the same computation cost.
Examples: df, liae, df-d, df-ud, liae-ud, ...
""").lower()
archi_split = archi.split('-')
archi_split = archi.split('-')
if len(archi_split) == 2:
archi_type, archi_opts = archi_split
elif len(archi_split) == 1:
archi_type, archi_opts = archi_split[0], None
else:
continue
if archi_type not in ['df', 'liae']:
continue
if archi_opts is not None:
if len(archi_opts) == 0:
continue
if len([ 1 for opt in archi_opts if opt not in ['u','d','t','c'] ]) != 0:
if len(archi_split) == 2:
archi_type, archi_opts = archi_split
elif len(archi_split) == 1:
archi_type, archi_opts = archi_split[0], None
else:
continue
if 'd' in archi_opts:
self.options['resolution'] = np.clip ( (self.options['resolution'] // 32) * 32, min_res, max_res)
if archi_type not in ['df', 'liae']:
continue
break
self.options['archi'] = archi
if archi_opts is not None:
if len(archi_opts) == 0:
continue
if len([ 1 for opt in archi_opts if opt not in ['u','d','t','c'] ]) != 0:
continue
default_d_dims = self.options['d_dims'] = self.load_or_def_option('d_dims', 64)
if 'd' in archi_opts:
self.options['resolution'] = np.clip ( (self.options['resolution'] // 32) * 32, min_res, max_res)
default_d_mask_dims = default_d_dims // 3
default_d_mask_dims += default_d_mask_dims % 2
default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
break
self.options['archi'] = archi
default_d_dims = self.options['d_dims'] = self.load_or_def_option('d_dims', 64)
default_d_mask_dims = default_d_dims // 3
default_d_mask_dims += default_d_mask_dims % 2
default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
if self.is_first_run():
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['e_dims'] = e_dims + e_dims % 2
e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['e_dims'] = e_dims + e_dims % 2
d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['d_dims'] = d_dims + d_dims % 2
d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['d_dims'] = d_dims + d_dims % 2
d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
if self.is_first_run() or ask_override:
if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head' or self.options['face_type'] == 'custom':
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head' or self.options['face_type'] == 'custom':
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")
self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
self.options['mouth_prio'] = io.input_bool ("Mouth priority", default_mouth_prio, help_message='Helps to fix mouth problems during training by forcing the neural network to train mouth with higher priority similar to eyes ')
self.options['eyes_prio'] = io.input_bool ("Eyes priority", default_eyes_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction ( especially on HD architectures ) by forcing the neural network to train eyes with higher priority. before/after https://i.imgur.com/YQHOuSR.jpg ')
self.options['mouth_prio'] = io.input_bool ("Mouth priority", default_mouth_prio, help_message='Helps to fix mouth problems during training by forcing the neural network to train mouth with higher priority similar to eyes ')
self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')
self.options['blur_out_mask'] = io.input_bool ("Blur out mask", default_blur_out_mask, help_message='Blurs nearby area outside of applied face mask of training samples. The result is the background near the face is smoothed and less noticeable on swapped face. The exact xseg mask in src and dst faceset is required.')
self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')
self.options['blur_out_mask'] = io.input_bool ("Blur out mask", default_blur_out_mask, help_message='Blurs nearby area outside of applied face mask of training samples. The result is the background near the face is smoothed and less noticeable on swapped face. The exact xseg mask in src and dst faceset is required.')
default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0)
default_gan_version = self.options['gan_version'] = self.load_or_def_option('gan_version', 2)
@ -160,54 +165,55 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
default_gan_noise = self.options['gan_noise'] = self.load_or_def_option('gan_noise', 0.0)
if self.is_first_run() or ask_override:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
self.options['adabelief'] = io.input_bool ("Use AdaBelief optimizer?", default_adabelief, help_message="Use AdaBelief optimizer. It requires more VRAM, but the accuracy and the generalization of the model is higher.")
self.options['adabelief'] = io.input_bool ("Use AdaBelief optimizer?", default_adabelief, help_message="Use AdaBelief optimizer. It requires more VRAM, but the accuracy and the generalization of the model is higher.")
self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")
self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")
self.options['loss_function'] = io.input_str(f"Loss function", default_loss_function, ['SSIM', 'MS-SSIM', 'MS-SSIM+L1'],
help_message="Change loss function used for image quality assessment.")
self.options['loss_function'] = io.input_str(f"Loss function", default_loss_function, ['SSIM', 'MS-SSIM', 'MS-SSIM+L1'],
help_message="Change loss function used for image quality assessment.")
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
self.options['random_hsv_power'] = np.clip ( io.input_number ("Random hue/saturation/light intensity", default_random_hsv_power, add_info="0.0 .. 0.3", help_message="Random hue/saturation/light intensity applied to the src face set only at the input of the neural network. Stabilizes color perturbations during face swapping. Reduces the quality of the color transfer by selecting the closest one in the src faceset. Thus the src faceset must be diverse enough. Typical fine value is 0.05"), 0.0, 0.3 )
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="")
self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="")
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
self.options['random_blur'] = io.input_bool("Enable random blur of samples", default_random_blur, help_message="")
self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", default_random_jpeg, help_message="")
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Forces the neural network to learn small details of the face. Enable it only when the face is trained enough and don't disable. Typical value is 0.1"), 0.0, 10.0 )
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 10.0", help_message="Train the network in Generative Adversarial manner. Forces the neural network to learn small details of the face. Enable it only when the face is trained enough and don't disable. Typical value is 0.1"), 0.0, 10.0 )
if self.options['gan_power'] != 0.0:
self.options['gan_version'] = np.clip (io.input_int("GAN version", default_gan_version, add_info="2 or 3", help_message="Choose GAN version (v2: 7/16/2020, v3: 1/3/2021):"), 2, 3)
if self.options['gan_power'] != 0.0:
self.options['gan_version'] = np.clip (io.input_int("GAN version", default_gan_version, add_info="2 or 3", help_message="Choose GAN version (v2: 7/16/2020, v3: 1/3/2021):"), 2, 3)
if self.options['gan_version'] == 3:
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
self.options['gan_patch_size'] = gan_patch_size
if self.options['gan_version'] == 3:
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
self.options['gan_patch_size'] = gan_patch_size
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
self.options['gan_dims'] = gan_dims
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
self.options['gan_dims'] = gan_dims
self.options['gan_smoothing'] = np.clip ( io.input_number("GAN label smoothing", default_gan_smoothing, add_info="0 - 0.5", help_message="Uses soft labels with values slightly off from 0/1 for GAN, has a regularizing effect"), 0, 0.5)
self.options['gan_noise'] = np.clip ( io.input_number("GAN noisy labels", default_gan_noise, add_info="0 - 0.5", help_message="Marks some images with the wrong label, helps prevent collapse"), 0, 0.5)
self.options['gan_smoothing'] = np.clip ( io.input_number("GAN label smoothing", default_gan_smoothing, add_info="0 - 0.5", help_message="Uses soft labels with values slightly off from 0/1 for GAN, has a regularizing effect"), 0, 0.5)
self.options['gan_noise'] = np.clip ( io.input_number("GAN noisy labels", default_gan_noise, add_info="0 - 0.5", help_message="Marks some images with the wrong label, helps prevent collapse"), 0, 0.5)
if 'df' in self.options['archi']:
self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
else:
self.options['true_face_power'] = 0.0
if 'df' in self.options['archi']:
self.options['true_face_power'] = np.clip ( io.input_number ("'True face' power.", default_true_face_power, add_info="0.0000 .. 1.0", help_message="Experimental option. Discriminates result face to be more like src face. Higher value - stronger discrimination. Typical value is 0.01 . Comparison - https://i.imgur.com/czScS9q.png"), 0.0, 1.0 )
else:
self.options['true_face_power'] = 0.0
self.options['background_power'] = np.clip ( io.input_number("Background power", default_background_power, add_info="0.0..1.0", help_message="Learn the area outside of the mask. Helps smooth out area near the mask boundaries. Can be used at any time"), 0.0, 1.0 )
self.options['background_power'] = np.clip ( io.input_number("Background power", default_background_power, add_info="0.0..1.0", help_message="Learn the area outside of the mask. Helps smooth out area near the mask boundaries. Can be used at any time"), 0.0, 1.0 )
self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn the color of the predicted face to be the same as dst inside mask. If you want to use this option with 'whole_face' you have to use XSeg trained mask. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.001 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn the area outside mask of the predicted face to be the same as dst. If you want to use this option with 'whole_face' you have to use XSeg trained mask. For whole_face you have to use XSeg trained mask. This can make face more like dst. Enabling this option increases the chance of model collapse. Typical value is 2.0"), 0.0, 100.0 )
self.options['face_style_power'] = np.clip ( io.input_number("Face style power", default_face_style_power, add_info="0.0..100.0", help_message="Learn the color of the predicted face to be the same as dst inside mask. If you want to use this option with 'whole_face' you have to use XSeg trained mask. Warning: Enable it only after 10k iters, when predicted face is clear enough to start learn style. Start from 0.001 value and check history changes. Enabling this option increases the chance of model collapse."), 0.0, 100.0 )
self.options['bg_style_power'] = np.clip ( io.input_number("Background style power", default_bg_style_power, add_info="0.0..100.0", help_message="Learn the area outside mask of the predicted face to be the same as dst. If you want to use this option with 'whole_face' you have to use XSeg trained mask. For whole_face you have to use XSeg trained mask. This can make face more like dst. Enabling this option increases the chance of model collapse. Typical value is 2.0"), 0.0, 100.0 )
self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot', 'fs-aug'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best. FS aug adds random color to dst and src")
self.options['random_color'] = io.input_bool ("Random color", default_random_color, help_message="Samples are randomly rotated around the L axis in LAB colorspace, helps generalize training")
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot', 'fs-aug'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best. FS aug adds random color to dst and src")
self.options['random_color'] = io.input_bool ("Random color", default_random_color, help_message="Samples are randomly rotated around the L axis in LAB colorspace, helps generalize training")
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, styles=0.0, uniform_yaw=Y")
self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, styles=0.0, uniform_yaw=Y")
if self.options['pretrain'] and self.get_pretraining_data_path() is None:
raise Exception("pretraining_data_path is not defined")
@ -787,7 +793,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
random_ct_samples_path=training_data_dst_path if ct_mode is not None and not self.pretrain else None
cpu_count = min(multiprocessing.cpu_count(), 4)
cpu_count = multiprocessing.cpu_count()
src_generators_count = cpu_count // 2
dst_generators_count = cpu_count // 2
if ct_mode is not None:
@ -947,7 +953,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
return ( ('src_loss', np.mean(src_loss) ), ('dst_loss', np.mean(dst_loss) ), )
#override
def onGetPreview(self, samples, for_history=False, filenames=None):
def onGetPreview(self, samples, for_history=False):
( (warped_src, target_src, target_srcm, target_srcm_em),
(warped_dst, target_dst, target_dstm, target_dstm_em) ) = samples
@ -959,11 +965,6 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
n_samples = min(4, self.get_batch_size(), 800 // self.resolution )
if filenames is not None and len(filenames) > 0:
for i in range(n_samples):
S[i] = label_face_filename(S[i], filenames[0][i])
D[i] = label_face_filename(D[i], filenames[1][i])
if self.resolution <= 256:
result = []
@ -1063,4 +1064,9 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
import merger
return self.predictor_func, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type, default_mode = 'overlay')
#override
def get_config_schema_path(self):
config_path = Path(__file__).parent.absolute() / Path("config_schema.json")
return config_path
Model = SAEHDModel

View file

@ -11,6 +11,8 @@ from facelib import FaceType, XSegNet
from models import ModelBase
from samplelib import *
from pathlib import Path
class XSegModel(ModelBase):
def __init__(self, *args, **kwargs):
@ -18,7 +20,7 @@ class XSegModel(ModelBase):
#override
def on_initialize_options(self):
ask_override = self.ask_override()
ask_override = False if self.read_from_conf else self.ask_override()
if not self.is_first_run() and ask_override:
if io.input_bool(f"Restart training?", False, help_message="Reset model weights and start training from scratch."):
@ -28,11 +30,13 @@ class XSegModel(ModelBase):
default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False)
if self.is_first_run():
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Choose the same as your deepfake model.").lower()
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['h','mf','f','wf','head'], help_message="Half / mid face / full face / whole face / head. Choose the same as your deepfake model.").lower()
if self.is_first_run() or ask_override:
self.ask_batch_size(4, range=[2,16])
self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain)
if (self.read_from_conf and not self.config_file_exists) or not self.read_from_conf:
self.ask_batch_size(4, range=[2,16])
self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain)
if not self.is_exporting and (self.options['pretrain'] and self.get_pretraining_data_path() is None):
raise Exception("pretraining_data_path is not defined")
@ -51,13 +55,11 @@ class XSegModel(ModelBase):
self.resolution = resolution = 256
self.face_type = {'h' : FaceType.HALF,
'mf' : FaceType.MID_FULL,
'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE,
'head' : FaceType.HEAD}[ self.options['face_type'] ]
place_model_on_cpu = len(devices) == 0
models_opt_device = '/CPU:0' if place_model_on_cpu else nn.tf_default_device_name
@ -279,5 +281,10 @@ class XSegModel(ModelBase):
output_names=['out_mask:0'],
opset=13,
output_path=output_path)
#override
def get_config_schema_path(self):
config_path = Path(__file__).parent.absolute() / Path("config_schema.json")
return config_path
Model = XSegModel
Model = XSegModel

View file

@ -0,0 +1,39 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$ref": "#/definitions/dfl_config",
"definitions": {
"dfl_config": {
"type": "object",
"additionalProperties": false,
"properties": {
"use_fp16": {
"type": "boolean"
},
"face_type": {
"type": "string",
"enum": [
"h",
"mf",
"f",
"wf",
"head",
"custom"
]
},
"pretrain": {
"type": "boolean"
},
"batch_size": {
"type": "integer",
"minimum": 1
}
},
"required": [
"batch_size",
"face_type",
"pretrain",
],
"title": "dfl_config"
}
}
}

View file

@ -3,3 +3,5 @@ from .ModelBase import ModelBase
def import_model(model_class_name):
module = __import__('Model_'+model_class_name, globals(), locals(), [], 1)
return getattr(module, 'Model')

View file

@ -11,3 +11,4 @@ tensorflow-gpu==2.4.0
tf2onnx==1.9.3
tensorboardX
crc32c
jsonschema

View file

@ -14,3 +14,4 @@ Flask==1.1.1
flask-socketio==4.2.1
tensorboardX
crc32c
jsonschema

View file

@ -115,7 +115,6 @@ class SampleGeneratorFace(SampleGeneratorBase):
samples, index_host, ct_samples, ct_index_host = param
bs = self.batch_size
filenames = []
while True:
batches = None
@ -142,6 +141,4 @@ class SampleGeneratorFace(SampleGeneratorBase):
for i in range(len(x)):
batches[i].append ( x[i] )
filenames.append(sample.filename)
yield ([ np.array(batch) for batch in batches], filenames)
yield [ np.array(batch) for batch in batches]