mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-20 13:33:24 -07:00
added XSeg model.
with XSeg model you can train your own mask segmentator of dst(and src) faces that will be used in merger for whole_face. Instead of using a pretrained model (which does not exist), you control which part of faces should be masked. Workflow is not easy, but at the moment it is the best solution for obtaining the best quality of whole_face's deepfakes using minimum effort without rotoscoping in AfterEffects. new scripts: XSeg) data_dst edit.bat XSeg) data_dst merge.bat XSeg) data_dst split.bat XSeg) data_src edit.bat XSeg) data_src merge.bat XSeg) data_src split.bat XSeg) train.bat Usage: unpack dst faceset if packed run XSeg) data_dst split.bat this scripts extracts (previously saved) .json data from jpg faces to use in label tool. run XSeg) data_dst edit.bat new tool 'labelme' is used use polygon (CTRL-N) to mask the face name polygon "1" (one symbol) as include polygon name polygon "0" (one symbol) as exclude polygon 'exclude polygons' will be applied after all 'include polygons' Hot keys: ctrl-N create polygon ctrl-J edit polygon A/D navigate between frames ctrl + mousewheel image zoom mousewheel vertical scroll alt+mousewheel horizontal scroll repeat for 10/50/100 faces, you don't need to mask every frame of dst, only frames where the face is different significantly, for example: closed eyes changed head direction changed light the more various faces you mask, the more quality you will get Start masking from the upper left area and follow the clockwise direction. Keep the same logic of masking for all frames, for example: the same approximated jaw line of the side faces, where the jaw is not visible the same hair line Mask the obstructions using polygon with name "0". run XSeg) data_dst merge.bat this script merges .json data of polygons into jpg faces, therefore faceset can be sorted or packed as usual. run XSeg) train.bat train the model Check the faces of 'XSeg dst faces' preview. if some faces have wrong or glitchy mask, then repeat steps: split run edit find these glitchy faces and mask them merge train further or restart training from scratch Restart training of XSeg model is only possible by deleting all 'model\XSeg_*' files. If you want to get the mask of the predicted face in merger, you should repeat the same steps for src faceset. New mask modes available in merger for whole_face: XSeg-prd - XSeg mask of predicted face -> faces from src faceset should be labeled XSeg-dst - XSeg mask of dst face -> faces from dst faceset should be labeled XSeg-prd*XSeg-dst - the smallest area of both if workspace\model folder contains trained XSeg model, then merger will use it, otherwise you will get transparent mask by using XSeg-* modes. Some screenshots: label tool: https://i.imgur.com/aY6QGw1.jpg trainer : https://i.imgur.com/NM1Kn3s.jpg merger : https://i.imgur.com/glUzFQ8.jpg example of the fake using 13 segmented dst faces : https://i.imgur.com/wmvyizU.gifv
This commit is contained in:
parent
2be940092b
commit
45582d129d
27 changed files with 577 additions and 711 deletions
|
@ -32,6 +32,7 @@ class ModelBase(object):
|
|||
force_gpu_idxs=None,
|
||||
cpu_only=False,
|
||||
debug=False,
|
||||
force_model_class_name=None,
|
||||
**kwargs):
|
||||
self.is_training = is_training
|
||||
self.saved_models_path = saved_models_path
|
||||
|
@ -44,80 +45,84 @@ class ModelBase(object):
|
|||
|
||||
self.model_class_name = model_class_name = Path(inspect.getmodule(self).__file__).parent.name.rsplit("_", 1)[1]
|
||||
|
||||
if force_model_name is not None:
|
||||
self.model_name = force_model_name
|
||||
else:
|
||||
while True:
|
||||
# gather all model dat files
|
||||
saved_models_names = []
|
||||
for filepath in pathex.get_file_paths(saved_models_path):
|
||||
filepath_name = filepath.name
|
||||
if filepath_name.endswith(f'{model_class_name}_data.dat'):
|
||||
saved_models_names += [ (filepath_name.split('_')[0], os.path.getmtime(filepath)) ]
|
||||
if force_model_class_name is None:
|
||||
if force_model_name is not None:
|
||||
self.model_name = force_model_name
|
||||
else:
|
||||
while True:
|
||||
# gather all model dat files
|
||||
saved_models_names = []
|
||||
for filepath in pathex.get_file_paths(saved_models_path):
|
||||
filepath_name = filepath.name
|
||||
if filepath_name.endswith(f'{model_class_name}_data.dat'):
|
||||
saved_models_names += [ (filepath_name.split('_')[0], os.path.getmtime(filepath)) ]
|
||||
|
||||
# sort by modified datetime
|
||||
saved_models_names = sorted(saved_models_names, key=operator.itemgetter(1), reverse=True )
|
||||
saved_models_names = [ x[0] for x in saved_models_names ]
|
||||
# sort by modified datetime
|
||||
saved_models_names = sorted(saved_models_names, key=operator.itemgetter(1), reverse=True )
|
||||
saved_models_names = [ x[0] for x in saved_models_names ]
|
||||
|
||||
if len(saved_models_names) != 0:
|
||||
io.log_info ("Choose one of saved models, or enter a name to create a new model.")
|
||||
io.log_info ("[r] : rename")
|
||||
io.log_info ("[d] : delete")
|
||||
io.log_info ("")
|
||||
for i, model_name in enumerate(saved_models_names):
|
||||
s = f"[{i}] : {model_name} "
|
||||
if i == 0:
|
||||
s += "- latest"
|
||||
io.log_info (s)
|
||||
if len(saved_models_names) != 0:
|
||||
io.log_info ("Choose one of saved models, or enter a name to create a new model.")
|
||||
io.log_info ("[r] : rename")
|
||||
io.log_info ("[d] : delete")
|
||||
io.log_info ("")
|
||||
for i, model_name in enumerate(saved_models_names):
|
||||
s = f"[{i}] : {model_name} "
|
||||
if i == 0:
|
||||
s += "- latest"
|
||||
io.log_info (s)
|
||||
|
||||
inp = io.input_str(f"", "0", show_default_value=False )
|
||||
model_idx = -1
|
||||
try:
|
||||
model_idx = np.clip ( int(inp), 0, len(saved_models_names)-1 )
|
||||
except:
|
||||
pass
|
||||
inp = io.input_str(f"", "0", show_default_value=False )
|
||||
model_idx = -1
|
||||
try:
|
||||
model_idx = np.clip ( int(inp), 0, len(saved_models_names)-1 )
|
||||
except:
|
||||
pass
|
||||
|
||||
if model_idx == -1:
|
||||
if len(inp) == 1:
|
||||
is_rename = inp[0] == 'r'
|
||||
is_delete = inp[0] == 'd'
|
||||
if model_idx == -1:
|
||||
if len(inp) == 1:
|
||||
is_rename = inp[0] == 'r'
|
||||
is_delete = inp[0] == 'd'
|
||||
|
||||
if is_rename or is_delete:
|
||||
if len(saved_models_names) != 0:
|
||||
|
||||
if is_rename:
|
||||
name = io.input_str(f"Enter the name of the model you want to rename")
|
||||
elif is_delete:
|
||||
name = io.input_str(f"Enter the name of the model you want to delete")
|
||||
|
||||
if name in saved_models_names:
|
||||
if is_rename or is_delete:
|
||||
if len(saved_models_names) != 0:
|
||||
|
||||
if is_rename:
|
||||
new_model_name = io.input_str(f"Enter new name of the model")
|
||||
name = io.input_str(f"Enter the name of the model you want to rename")
|
||||
elif is_delete:
|
||||
name = io.input_str(f"Enter the name of the model you want to delete")
|
||||
|
||||
for filepath in pathex.get_paths(saved_models_path):
|
||||
filepath_name = filepath.name
|
||||
if name in saved_models_names:
|
||||
|
||||
model_filename, remain_filename = filepath_name.split('_', 1)
|
||||
if model_filename == name:
|
||||
if is_rename:
|
||||
new_model_name = io.input_str(f"Enter new name of the model")
|
||||
|
||||
if is_rename:
|
||||
new_filepath = filepath.parent / ( new_model_name + '_' + remain_filename )
|
||||
filepath.rename (new_filepath)
|
||||
elif is_delete:
|
||||
filepath.unlink()
|
||||
continue
|
||||
for filepath in pathex.get_paths(saved_models_path):
|
||||
filepath_name = filepath.name
|
||||
|
||||
model_filename, remain_filename = filepath_name.split('_', 1)
|
||||
if model_filename == name:
|
||||
|
||||
if is_rename:
|
||||
new_filepath = filepath.parent / ( new_model_name + '_' + remain_filename )
|
||||
filepath.rename (new_filepath)
|
||||
elif is_delete:
|
||||
filepath.unlink()
|
||||
continue
|
||||
|
||||
self.model_name = inp
|
||||
else:
|
||||
self.model_name = saved_models_names[model_idx]
|
||||
|
||||
self.model_name = inp
|
||||
else:
|
||||
self.model_name = saved_models_names[model_idx]
|
||||
self.model_name = io.input_str(f"No saved models found. Enter a name of a new model", "new")
|
||||
self.model_name = self.model_name.replace('_', ' ')
|
||||
break
|
||||
|
||||
else:
|
||||
self.model_name = io.input_str(f"No saved models found. Enter a name of a new model", "new")
|
||||
self.model_name = self.model_name.replace('_', ' ')
|
||||
break
|
||||
|
||||
self.model_name = self.model_name + '_' + self.model_class_name
|
||||
|
||||
self.model_name = self.model_name + '_' + self.model_class_name
|
||||
else:
|
||||
self.model_name = force_model_class_name
|
||||
|
||||
self.iter = 0
|
||||
self.options = {}
|
||||
|
|
|
@ -13,6 +13,9 @@ from samplelib import *
|
|||
|
||||
class FANSegModel(ModelBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, force_model_class_name='FANSeg', **kwargs)
|
||||
|
||||
#override
|
||||
def on_initialize_options(self):
|
||||
device_config = nn.getCurrentDeviceConfig()
|
||||
|
@ -48,7 +51,7 @@ class FANSegModel(ModelBase):
|
|||
mask_shape = nn.get4Dshape(resolution,resolution,1)
|
||||
|
||||
# Initializing model classes
|
||||
self.model = TernausNet(f'{self.model_name}_FANSeg_{FaceType.toString(self.face_type)}',
|
||||
self.model = TernausNet(f'FANSeg_{FaceType.toString(self.face_type)}',
|
||||
resolution,
|
||||
load_weights=not self.is_first_run(),
|
||||
weights_file_root=self.get_model_root_path(),
|
||||
|
@ -117,14 +120,14 @@ class FANSegModel(ModelBase):
|
|||
|
||||
src_generator = SampleGeneratorFace(training_data_src_path, random_ct_samples_path=training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True),
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'ct_mode':'lct', 'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'motion_blur':(25, 5), 'gaussian_blur':(25,5), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'ct_mode':'lct', 'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'random_motion_blur':(25, 5), 'random_gaussian_blur':(25,5), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
],
|
||||
generators_count=src_generators_count )
|
||||
|
||||
dst_generator = SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True),
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'motion_blur':(25, 5), 'gaussian_blur':(25,5), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
],
|
||||
generators_count=dst_generators_count,
|
||||
raise_on_no_data=False )
|
||||
|
|
|
@ -7,29 +7,19 @@ import numpy as np
|
|||
from core import mathlib
|
||||
from core.interact import interact as io
|
||||
from core.leras import nn
|
||||
from facelib import FaceType, TernausNet, DFLSegNet
|
||||
from facelib import FaceType, TernausNet, XSegNet
|
||||
from models import ModelBase
|
||||
from samplelib import *
|
||||
|
||||
class SkinSegModel(ModelBase):
|
||||
class XSegModel(ModelBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, force_model_class_name='XSeg', **kwargs)
|
||||
|
||||
#override
|
||||
def on_initialize_options(self):
|
||||
device_config = nn.getCurrentDeviceConfig()
|
||||
yn_str = {True:'y',False:'n'}
|
||||
|
||||
ask_override = self.ask_override()
|
||||
if self.is_first_run() or ask_override:
|
||||
self.ask_autobackup_hour()
|
||||
self.ask_write_preview_history()
|
||||
self.ask_target_iter()
|
||||
self.ask_batch_size(8)
|
||||
|
||||
default_lr_dropout = self.options['lr_dropout'] = self.load_or_def_option('lr_dropout', False)
|
||||
self.set_batch_size(4)
|
||||
|
||||
if self.is_first_run() or ask_override:
|
||||
self.options['lr_dropout'] = io.input_bool ("Use learning rate dropout", default_lr_dropout, help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations.")
|
||||
|
||||
#override
|
||||
def on_initialize(self):
|
||||
device_config = nn.getCurrentDeviceConfig()
|
||||
|
@ -43,20 +33,20 @@ class SkinSegModel(ModelBase):
|
|||
self.resolution = resolution = 256
|
||||
self.face_type = FaceType.WHOLE_FACE
|
||||
|
||||
place_model_on_cpu = True #len(devices) == 0
|
||||
place_model_on_cpu = len(devices) == 0
|
||||
models_opt_device = '/CPU:0' if place_model_on_cpu else '/GPU:0'
|
||||
|
||||
bgr_shape = nn.get4Dshape(resolution,resolution,3)
|
||||
mask_shape = nn.get4Dshape(resolution,resolution,1)
|
||||
|
||||
# Initializing model classes
|
||||
self.model = DFLSegNet(name=f'{self.model_name}_SkinSeg',
|
||||
self.model = XSegNet(name=f'XSeg',
|
||||
resolution=resolution,
|
||||
load_weights=not self.is_first_run(),
|
||||
weights_file_root=self.get_model_root_path(),
|
||||
training=True,
|
||||
place_model_on_cpu=place_model_on_cpu,
|
||||
optimizer=nn.RMSprop(lr=0.0001, lr_dropout=0.3 if self.options['lr_dropout'] else 1.0, name='opt'),
|
||||
optimizer=nn.RMSprop(lr=0.0001, lr_dropout=0.3, name='opt'),
|
||||
data_format=nn.data_format)
|
||||
|
||||
if self.is_training:
|
||||
|
@ -111,38 +101,33 @@ class SkinSegModel(ModelBase):
|
|||
|
||||
# initializing sample generators
|
||||
cpu_count = min(multiprocessing.cpu_count(), 8)
|
||||
src_dst_generators_count = cpu_count // 2
|
||||
src_generators_count = cpu_count // 2
|
||||
dst_generators_count = cpu_count // 2
|
||||
src_generators_count = int(src_generators_count * 1.5)
|
||||
|
||||
"""
|
||||
src_generator = SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True),
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR_RANDOM_HSV_SHIFT, 'border_replicate':False, 'face_type':self.face_type, 'motion_blur':(25, 5), 'gaussian_blur':(25,5), 'random_bilinear_resize':(25,75), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.NONE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
],
|
||||
generators_count=src_generators_count )
|
||||
"""
|
||||
src_generator = SampleGeneratorFaceSkinSegDataset(self.training_data_src_path,
|
||||
debug=self.is_debug(),
|
||||
batch_size=self.get_batch_size(),
|
||||
resolution=resolution,
|
||||
face_type=self.face_type,
|
||||
generators_count=src_generators_count,
|
||||
data_format=nn.data_format)
|
||||
|
||||
srcdst_generator = SampleGeneratorFaceXSeg([self.training_data_src_path, self.training_data_dst_path],
|
||||
debug=self.is_debug(),
|
||||
batch_size=self.get_batch_size(),
|
||||
resolution=resolution,
|
||||
face_type=self.face_type,
|
||||
generators_count=src_dst_generators_count,
|
||||
data_format=nn.data_format)
|
||||
|
||||
src_generator = SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=False),
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp':False, 'transform':False, 'channel_type' : SampleProcessor.ChannelType.BGR, 'border_replicate':False, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
],
|
||||
generators_count=src_generators_count,
|
||||
raise_on_no_data=False )
|
||||
dst_generator = SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
|
||||
sample_process_options=SampleProcessor.Options(random_flip=True),
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'border_replicate':False, 'face_type':self.face_type, 'motion_blur':(25, 5), 'gaussian_blur':(25,5), 'random_bilinear_resize':(25,75), 'data_format':nn.data_format, 'resolution': resolution},
|
||||
sample_process_options=SampleProcessor.Options(random_flip=False),
|
||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE, 'warp':False, 'transform':False, 'channel_type' : SampleProcessor.ChannelType.BGR, 'border_replicate':False, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||
],
|
||||
generators_count=dst_generators_count,
|
||||
raise_on_no_data=False )
|
||||
|
||||
|
||||
if not dst_generator.is_initialized():
|
||||
io.log_info(f"\nTo view the model on unseen faces, place any image faces in {self.training_data_dst_path}.\n")
|
||||
|
||||
self.set_training_data_generators ([src_generator, dst_generator])
|
||||
self.set_training_data_generators ([srcdst_generator, src_generator, dst_generator])
|
||||
|
||||
#override
|
||||
def get_model_filename_list(self):
|
||||
|
@ -154,6 +139,8 @@ class SkinSegModel(ModelBase):
|
|||
|
||||
#override
|
||||
def onTrainOneIter(self):
|
||||
|
||||
|
||||
image_np, mask_np = self.generate_next_samples()[0]
|
||||
loss = self.train (image_np, mask_np)
|
||||
|
||||
|
@ -163,8 +150,8 @@ class SkinSegModel(ModelBase):
|
|||
def onGetPreview(self, samples):
|
||||
n_samples = min(4, self.get_batch_size(), 800 // self.resolution )
|
||||
|
||||
src_samples, dst_samples = samples
|
||||
image_np, mask_np = src_samples
|
||||
srcdst_samples, src_samples, dst_samples = samples
|
||||
image_np, mask_np = srcdst_samples
|
||||
|
||||
I, M, IM, = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in ([image_np,mask_np] + self.view (image_np) ) ]
|
||||
M, IM, = [ np.repeat (x, (3,), -1) for x in [M, IM] ]
|
||||
|
@ -174,10 +161,24 @@ class SkinSegModel(ModelBase):
|
|||
result = []
|
||||
st = []
|
||||
for i in range(n_samples):
|
||||
ar = I[i]*M[i]+0.5*I[i]*(1-M[i])+0.5*green_bg*(1-M[i]), IM[i], I[i]*IM[i] + green_bg*(1-IM[i])
|
||||
ar = I[i]*M[i]+0.5*I[i]*(1-M[i])+0.5*green_bg*(1-M[i]), IM[i], I[i]*IM[i]+0.5*I[i]*(1-IM[i]) + 0.5*green_bg*(1-IM[i])
|
||||
st.append ( np.concatenate ( ar, axis=1) )
|
||||
result += [ ('SkinSeg training faces', np.concatenate (st, axis=0 )), ]
|
||||
result += [ ('XSeg training faces', np.concatenate (st, axis=0 )), ]
|
||||
|
||||
if len(src_samples) != 0:
|
||||
src_np, = src_samples
|
||||
|
||||
|
||||
D, DM, = [ np.clip(nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in ([src_np] + self.view (src_np) ) ]
|
||||
DM, = [ np.repeat (x, (3,), -1) for x in [DM] ]
|
||||
|
||||
st = []
|
||||
for i in range(n_samples):
|
||||
ar = D[i], DM[i], D[i]*DM[i] + 0.5*D[i]*(1-DM[i]) + 0.5*green_bg*(1-DM[i])
|
||||
st.append ( np.concatenate ( ar, axis=1) )
|
||||
|
||||
result += [ ('XSeg src faces', np.concatenate (st, axis=0 )), ]
|
||||
|
||||
if len(dst_samples) != 0:
|
||||
dst_np, = dst_samples
|
||||
|
||||
|
@ -187,11 +188,11 @@ class SkinSegModel(ModelBase):
|
|||
|
||||
st = []
|
||||
for i in range(n_samples):
|
||||
ar = D[i], DM[i], D[i]*DM[i]+ green_bg*(1-DM[i])
|
||||
ar = D[i], DM[i], D[i]*DM[i] + 0.5*D[i]*(1-DM[i]) + 0.5*green_bg*(1-DM[i])
|
||||
st.append ( np.concatenate ( ar, axis=1) )
|
||||
|
||||
result += [ ('SkinSeg unseen faces', np.concatenate (st, axis=0 )), ]
|
||||
result += [ ('XSeg dst faces', np.concatenate (st, axis=0 )), ]
|
||||
|
||||
return result
|
||||
|
||||
Model = SkinSegModel
|
||||
Model = XSegModel
|
Loading…
Add table
Add a link
Reference in a new issue