mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-08 05:51:40 -07:00
SAE : WARNING, RETRAIN IS REQUIRED !
fixed model sizes from previous update. avoided bug in ML framework(keras) that forces to train the model on random noise. Converter: added blur on the same keys as sharpness Added new model 'TrueFace'. This is a GAN model ported from https://github.com/NVlabs/FUNIT Model produces near zero morphing and high detail face. Model has higher failure rate than other models. Keep src and dst faceset in same lighting conditions.
This commit is contained in:
parent
201b762541
commit
dc11ec32be
26 changed files with 1308 additions and 250 deletions
|
@ -87,22 +87,26 @@ class ConvertSubprocessor(Subprocessor):
|
|||
#therefore forcing active_DeviceConfig to CPU only
|
||||
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||
|
||||
def sharpen_func (img, sharpen_mode=0, kernel_size=3, amount=150):
|
||||
def blursharpen_func (img, sharpen_mode=0, kernel_size=3, amount=100):
|
||||
if kernel_size % 2 == 0:
|
||||
kernel_size += 1
|
||||
|
||||
if sharpen_mode == 1: #box
|
||||
kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32)
|
||||
kernel[ kernel_size//2, kernel_size//2] = 1.0
|
||||
box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2)
|
||||
kernel = kernel + (kernel - box_filter) * amount
|
||||
return cv2.filter2D(img, -1, kernel)
|
||||
elif sharpen_mode == 2: #gaussian
|
||||
if amount > 0:
|
||||
if sharpen_mode == 1: #box
|
||||
kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32)
|
||||
kernel[ kernel_size//2, kernel_size//2] = 1.0
|
||||
box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2)
|
||||
kernel = kernel + (kernel - box_filter) * amount
|
||||
return cv2.filter2D(img, -1, kernel)
|
||||
elif sharpen_mode == 2: #gaussian
|
||||
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
|
||||
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
|
||||
return img
|
||||
elif amount < 0:
|
||||
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
|
||||
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
|
||||
return img
|
||||
img = cv2.addWeighted(img, 1.0 - a / 50.0, blur, a /50.0, 0)
|
||||
return img
|
||||
return img
|
||||
self.sharpen_func = sharpen_func
|
||||
self.blursharpen_func = blursharpen_func
|
||||
|
||||
self.fanseg_by_face_type = {}
|
||||
self.fanseg_input_size = 256
|
||||
|
@ -128,7 +132,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
#override
|
||||
def process_data(self, pf): #pf=ProcessingFrame
|
||||
cfg = pf.cfg.copy()
|
||||
cfg.sharpen_func = self.sharpen_func
|
||||
cfg.blursharpen_func = self.blursharpen_func
|
||||
cfg.superres_func = self.superres_func
|
||||
cfg.ebs_ct_func = self.ebs_ct_func
|
||||
|
||||
|
@ -221,11 +225,13 @@ class ConvertSubprocessor(Subprocessor):
|
|||
|
||||
session_data = None
|
||||
if self.is_interactive and self.converter_session_filepath.exists():
|
||||
try:
|
||||
with open( str(self.converter_session_filepath), "rb") as f:
|
||||
session_data = pickle.loads(f.read())
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if io.input_bool ("Use saved session? (y/n skip:y) : ", True):
|
||||
try:
|
||||
with open( str(self.converter_session_filepath), "rb") as f:
|
||||
session_data = pickle.loads(f.read())
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
self.frames = frames
|
||||
self.frames_idxs = [ *range(len(self.frames)) ]
|
||||
|
@ -430,9 +436,9 @@ class ConvertSubprocessor(Subprocessor):
|
|||
elif chr_key == 'g':
|
||||
cfg.add_color_degrade_power(-1 if not shift_pressed else -5)
|
||||
elif chr_key == 'y':
|
||||
cfg.add_sharpen_amount(1 if not shift_pressed else 5)
|
||||
cfg.add_blursharpen_amount(1 if not shift_pressed else 5)
|
||||
elif chr_key == 'h':
|
||||
cfg.add_sharpen_amount(-1 if not shift_pressed else -5)
|
||||
cfg.add_blursharpen_amount(-1 if not shift_pressed else -5)
|
||||
elif chr_key == 'u':
|
||||
cfg.add_output_face_scale(1 if not shift_pressed else 5)
|
||||
elif chr_key == 'j':
|
||||
|
@ -453,9 +459,9 @@ class ConvertSubprocessor(Subprocessor):
|
|||
|
||||
else:
|
||||
if chr_key == 'y':
|
||||
cfg.add_sharpen_amount(1 if not shift_pressed else 5)
|
||||
cfg.add_blursharpen_amount(1 if not shift_pressed else 5)
|
||||
elif chr_key == 'h':
|
||||
cfg.add_sharpen_amount(-1 if not shift_pressed else -5)
|
||||
cfg.add_blursharpen_amount(-1 if not shift_pressed else -5)
|
||||
elif chr_key == 's':
|
||||
cfg.toggle_add_source_image()
|
||||
elif chr_key == 'v':
|
||||
|
@ -576,6 +582,8 @@ class ConvertSubprocessor(Subprocessor):
|
|||
def main (args, device_args):
|
||||
io.log_info ("Running converter.\r\n")
|
||||
|
||||
training_data_src_dir = args.get('training_data_src_dir', None)
|
||||
training_data_src_path = Path(training_data_src_dir) if training_data_src_dir is not None else None
|
||||
aligned_dir = args.get('aligned_dir', None)
|
||||
avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)
|
||||
|
||||
|
@ -598,7 +606,7 @@ def main (args, device_args):
|
|||
is_interactive = io.input_bool ("Use interactive converter? (y/n skip:y) : ", True) if not io.is_colab() else False
|
||||
|
||||
import models
|
||||
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
|
||||
model = models.import_model( args['model_name'])(model_path, device_args=device_args, training_data_src_path=training_data_src_path)
|
||||
converter_session_filepath = model.get_strpath_storage_for_file('converter_session.dat')
|
||||
predictor_func, predictor_input_shape, cfg = model.get_ConverterConfig()
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue