mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 13:32:09 -07:00
New script:
5.XSeg) data_dst/src mask for XSeg trainer - fetch.bat Copies faces containing XSeg polygons to aligned_xseg\ dir. Useful only if you want to collect labeled faces and reuse them in other fakes. Now you can use trained XSeg mask in the SAEHD training process. It’s mean default ‘full_face’ mask obtained from landmarks will be replaced with the mask obtained from the trained XSeg model. use 5.XSeg.optional) trained mask for data_dst/data_src - apply.bat 5.XSeg.optional) trained mask for data_dst/data_src - remove.bat Normally you don’t need it. You can use it, if you want to use ‘face_style’ and ‘bg_style’ with obstructions. XSeg trainer : now you can choose type of face XSeg trainer : now you can restart training in “override settings” Merger: XSeg-* modes now can be used with all types of faces. Therefore old MaskEditor, FANSEG models, and FAN-x modes have been removed, because the new XSeg solution is better, simpler and more convenient, which costs only 1 hour of manual masking for regular deepfake.
This commit is contained in:
parent
e5bad483ca
commit
6d3607a13d
30 changed files with 279 additions and 1520 deletions
|
@ -1,109 +1,96 @@
|
|||
import traceback
|
||||
import json
|
||||
import shutil
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from core import pathex
|
||||
from core.imagelib import IEPolys
|
||||
from core.cv2ex import *
|
||||
from core.interact import interact as io
|
||||
from core.leras import nn
|
||||
from DFLIMG import *
|
||||
from facelib import XSegNet
|
||||
|
||||
|
||||
def merge(input_dir):
|
||||
input_path = Path(input_dir)
|
||||
def apply_xseg(input_path, model_path):
|
||||
if not input_path.exists():
|
||||
raise ValueError('input_dir not found. Please ensure it exists.')
|
||||
raise ValueError(f'{input_path} not found. Please ensure it exists.')
|
||||
|
||||
if not model_path.exists():
|
||||
raise ValueError(f'{model_path} not found. Please ensure it exists.')
|
||||
|
||||
io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.')
|
||||
|
||||
device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True)
|
||||
nn.initialize(device_config)
|
||||
|
||||
xseg = XSegNet(name='XSeg',
|
||||
load_weights=True,
|
||||
weights_file_root=model_path,
|
||||
data_format=nn.data_format,
|
||||
raise_on_no_model_files=True)
|
||||
res = xseg.get_resolution()
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
images_processed = 0
|
||||
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
json_filepath = filepath.parent / (filepath.stem+'.json')
|
||||
if json_filepath.exists():
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is not None and dflimg.has_data():
|
||||
try:
|
||||
json_dict = json.loads(json_filepath.read_text())
|
||||
|
||||
seg_ie_polys = IEPolys()
|
||||
total_points = 0
|
||||
|
||||
#include polys first
|
||||
for shape in json_dict['shapes']:
|
||||
if shape['shape_type'] == 'polygon' and \
|
||||
shape['label'] != '0':
|
||||
seg_ie_poly = seg_ie_polys.add(1)
|
||||
|
||||
for x,y in shape['points']:
|
||||
seg_ie_poly.add( int(x), int(y) )
|
||||
total_points += 1
|
||||
|
||||
#exclude polys
|
||||
for shape in json_dict['shapes']:
|
||||
if shape['shape_type'] == 'polygon' and \
|
||||
shape['label'] == '0':
|
||||
seg_ie_poly = seg_ie_polys.add(0)
|
||||
|
||||
for x,y in shape['points']:
|
||||
seg_ie_poly.add( int(x), int(y) )
|
||||
total_points += 1
|
||||
|
||||
if total_points == 0:
|
||||
io.log_info(f"No points found in {json_filepath}, skipping.")
|
||||
continue
|
||||
|
||||
dflimg.set_seg_ie_polys ( seg_ie_polys.dump() )
|
||||
dflimg.save()
|
||||
|
||||
json_filepath.unlink()
|
||||
|
||||
images_processed += 1
|
||||
except:
|
||||
io.log_err(f"err {filepath}, {traceback.format_exc()}")
|
||||
return
|
||||
|
||||
io.log_info(f"Images processed: {images_processed}")
|
||||
|
||||
def split(input_dir ):
|
||||
input_path = Path(input_dir)
|
||||
if not input_path.exists():
|
||||
raise ValueError('input_dir not found. Please ensure it exists.')
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
images_processed = 0
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
json_filepath = filepath.parent / (filepath.stem+'.json')
|
||||
|
||||
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is not None and dflimg.has_data():
|
||||
try:
|
||||
seg_ie_polys = dflimg.get_seg_ie_polys()
|
||||
if seg_ie_polys is not None:
|
||||
json_dict = {}
|
||||
json_dict['version'] = "4.2.9"
|
||||
json_dict['flags'] = {}
|
||||
json_dict['shapes'] = []
|
||||
json_dict['imagePath'] = filepath.name
|
||||
json_dict['imageData'] = None
|
||||
|
||||
for poly_type, points_list in seg_ie_polys:
|
||||
shape_dict = {}
|
||||
shape_dict['label'] = str(poly_type)
|
||||
shape_dict['points'] = points_list
|
||||
shape_dict['group_id'] = None
|
||||
shape_dict['shape_type'] = 'polygon'
|
||||
shape_dict['flags'] = {}
|
||||
json_dict['shapes'].append( shape_dict )
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_info(f'{filepath} is not a DFLIMG')
|
||||
continue
|
||||
|
||||
img = cv2_imread(filepath).astype(np.float32) / 255.0
|
||||
h,w,c = img.shape
|
||||
if w != res:
|
||||
img = cv2.resize( img, (res,res), interpolation=cv2.INTER_CUBIC )
|
||||
if len(img.shape) == 2:
|
||||
img = img[...,None]
|
||||
|
||||
mask = xseg.extract(img)
|
||||
mask[mask < 0.5]=0
|
||||
mask[mask >= 0.5]=1
|
||||
|
||||
dflimg.set_xseg_mask(mask)
|
||||
dflimg.save()
|
||||
|
||||
json_filepath.write_text( json.dumps (json_dict,indent=4) )
|
||||
def remove_xseg(input_path):
|
||||
if not input_path.exists():
|
||||
raise ValueError(f'{input_path} not found. Please ensure it exists.')
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_info(f'{filepath} is not a DFLIMG')
|
||||
continue
|
||||
|
||||
dflimg.set_xseg_mask(None)
|
||||
dflimg.save()
|
||||
|
||||
def fetch_xseg(input_path):
|
||||
if not input_path.exists():
|
||||
raise ValueError(f'{input_path} not found. Please ensure it exists.')
|
||||
|
||||
output_path = input_path.parent / (input_path.name + '_xseg')
|
||||
output_path.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
io.log_info(f'Copying faces containing XSeg polygons to {output_path.name}/ folder.')
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
files_copied = 0
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_info(f'{filepath} is not a DFLIMG')
|
||||
continue
|
||||
|
||||
ie_polys = dflimg.get_seg_ie_polys()
|
||||
|
||||
dflimg.set_seg_ie_polys(None)
|
||||
dflimg.save()
|
||||
images_processed += 1
|
||||
except:
|
||||
io.log_err(f"err {filepath}, {traceback.format_exc()}")
|
||||
return
|
||||
|
||||
io.log_info(f"Images processed: {images_processed}")
|
||||
if ie_polys.has_polys():
|
||||
files_copied += 1
|
||||
shutil.copy ( str(filepath), str(output_path / filepath.name) )
|
||||
|
||||
io.log_info(f'Files copied: {files_copied}')
|
Loading…
Add table
Add a link
Reference in a new issue