mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-14 10:46:59 -07:00
update == 04.20.2019 == (#242)
* superb improved fanseg * _ * _ * added FANseg extractor for src and dst faces to use it in training * - * _ * _ * update to 'partial' func * _ * trained FANSeg_256_full_face.h5, new experimental models: AVATAR, RecycleGAN * _ * _ * _ * fix for TCC mode cards(tesla), was conflict with plaidML initialization. * _ * update manuals * _
This commit is contained in:
parent
7be2fd67f5
commit
046649e6be
32 changed files with 1152 additions and 329 deletions
|
@ -1,19 +1,23 @@
|
|||
import sys
|
||||
import multiprocessing
|
||||
import operator
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from utils import Path_utils
|
||||
|
||||
import cv2
|
||||
from utils.DFLPNG import DFLPNG
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.cv2_utils import *
|
||||
import shutil
|
||||
import numpy as np
|
||||
import time
|
||||
import multiprocessing
|
||||
|
||||
from converters import Converter
|
||||
from joblib import Subprocessor, SubprocessFunctionCaller
|
||||
from interact import interact as io
|
||||
from joblib import SubprocessFunctionCaller, Subprocessor
|
||||
from utils import Path_utils
|
||||
from utils.cv2_utils import *
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.DFLPNG import DFLPNG
|
||||
from imagelib import normalize_channels
|
||||
|
||||
class ConvertSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
@ -26,6 +30,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
self.converter = client_dict['converter']
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.alignments = client_dict['alignments']
|
||||
self.avatar_image_paths = client_dict['avatar_image_paths']
|
||||
self.debug = client_dict['debug']
|
||||
|
||||
#transfer and set stdin in order to work code.interact in debug subprocess
|
||||
|
@ -45,13 +50,15 @@ class ConvertSubprocessor(Subprocessor):
|
|||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
filename_path = Path(data)
|
||||
idx, filename = data
|
||||
filename_path = Path(filename)
|
||||
files_processed = 1
|
||||
faces_processed = 0
|
||||
|
||||
output_filename_path = self.output_path / (filename_path.stem + '.png')
|
||||
|
||||
if self.converter.type == Converter.TYPE_FACE and filename_path.stem not in self.alignments.keys():
|
||||
if (self.converter.type == Converter.TYPE_FACE or self.converter.type == Converter.TYPE_FACE_AVATAR ) \
|
||||
and filename_path.stem not in self.alignments.keys():
|
||||
if not self.debug:
|
||||
self.log_info ( 'no faces found for %s, copying without faces' % (filename_path.name) )
|
||||
|
||||
|
@ -62,19 +69,18 @@ class ConvertSubprocessor(Subprocessor):
|
|||
cv2_imwrite ( str(output_filename_path), image )
|
||||
else:
|
||||
image = (cv2_imread(str(filename_path)) / 255.0).astype(np.float32)
|
||||
h,w,c = image.shape
|
||||
if c > 3:
|
||||
image = image[...,0:3]
|
||||
|
||||
image = normalize_channels (image, 3)
|
||||
|
||||
if self.converter.type == Converter.TYPE_IMAGE:
|
||||
image = self.converter.convert_image(image, None, self.debug)
|
||||
image = self.converter.cli_convert_image(image, None, self.debug)
|
||||
|
||||
if self.debug:
|
||||
raise NotImplementedError
|
||||
#for img in image:
|
||||
# io.show_image ('Debug convert', img )
|
||||
# cv2.waitKey(0)
|
||||
return (1, image)
|
||||
|
||||
faces_processed = 1
|
||||
|
||||
elif self.converter.type == Converter.TYPE_IMAGE_WITH_LANDMARKS:
|
||||
#currently unused
|
||||
if filename_path.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filename_path) )
|
||||
elif filename_path.suffix == '.jpg':
|
||||
|
@ -85,7 +91,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
if dflimg is not None:
|
||||
image_landmarks = dflimg.get_landmarks()
|
||||
|
||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||
|
||||
if self.debug:
|
||||
raise NotImplementedError
|
||||
|
@ -96,7 +102,13 @@ class ConvertSubprocessor(Subprocessor):
|
|||
else:
|
||||
self.log_err ("%s is not a dfl image file" % (filename_path.name) )
|
||||
|
||||
elif self.converter.type == Converter.TYPE_FACE:
|
||||
elif self.converter.type == Converter.TYPE_FACE or self.converter.type == Converter.TYPE_FACE_AVATAR:
|
||||
|
||||
ava_face = None
|
||||
if self.converter.type == Converter.TYPE_FACE_AVATAR:
|
||||
ava_filename_path = self.avatar_image_paths[idx]
|
||||
ava_face = (cv2_imread(str(ava_filename_path)) / 255.0).astype(np.float32)
|
||||
ava_face = normalize_channels (ava_face, 3)
|
||||
faces = self.alignments[filename_path.stem]
|
||||
|
||||
if self.debug:
|
||||
|
@ -108,9 +120,9 @@ class ConvertSubprocessor(Subprocessor):
|
|||
self.log_info ( '\nConverting face_num [%d] in file [%s]' % (face_num, filename_path) )
|
||||
|
||||
if self.debug:
|
||||
debug_images += self.converter.cli_convert_face(image, image_landmarks, self.debug)
|
||||
debug_images += self.converter.cli_convert_face(image, image_landmarks, self.debug, avaperator_face_bgr=ava_face)
|
||||
else:
|
||||
image = self.converter.cli_convert_face(image, image_landmarks, self.debug)
|
||||
image = self.converter.cli_convert_face(image, image_landmarks, self.debug, avaperator_face_bgr=ava_face)
|
||||
|
||||
except Exception as e:
|
||||
e_str = traceback.format_exc()
|
||||
|
@ -133,16 +145,19 @@ class ConvertSubprocessor(Subprocessor):
|
|||
#overridable
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data
|
||||
idx, filename = data
|
||||
return filename
|
||||
|
||||
#override
|
||||
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False):
|
||||
def __init__(self, converter, input_path_image_paths, output_path, alignments, avatar_image_paths=None, debug = False):
|
||||
super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if debug == True else 60)
|
||||
|
||||
self.converter = converter
|
||||
self.input_data = self.input_path_image_paths = input_path_image_paths
|
||||
self.input_data_idxs = [ *range(len(self.input_data)) ]
|
||||
self.output_path = output_path
|
||||
self.alignments = alignments
|
||||
self.avatar_image_paths = avatar_image_paths
|
||||
self.debug = debug
|
||||
|
||||
self.files_processed = 0
|
||||
|
@ -158,6 +173,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
'converter' : self.converter,
|
||||
'output_dir' : str(self.output_path),
|
||||
'alignments' : self.alignments,
|
||||
'avatar_image_paths' : self.avatar_image_paths,
|
||||
'debug': self.debug,
|
||||
'stdin_fd': sys.stdin.fileno() if self.debug else None
|
||||
}
|
||||
|
@ -167,7 +183,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
if self.debug:
|
||||
io.named_window ("Debug convert")
|
||||
|
||||
io.progress_bar ("Converting", len (self.input_data) )
|
||||
io.progress_bar ("Converting", len (self.input_data_idxs) )
|
||||
|
||||
#overridable optional
|
||||
def on_clients_finalized(self):
|
||||
|
@ -178,13 +194,15 @@ class ConvertSubprocessor(Subprocessor):
|
|||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.input_data) > 0:
|
||||
return self.input_data.pop(0)
|
||||
if len (self.input_data_idxs) > 0:
|
||||
idx = self.input_data_idxs.pop(0)
|
||||
return (idx, self.input_data[idx])
|
||||
return None
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.input_data.insert(0, data)
|
||||
idx, filename = data
|
||||
self.input_data_idxs.insert(0, idx)
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
|
@ -209,7 +227,8 @@ def main (args, device_args):
|
|||
io.log_info ("Running converter.\r\n")
|
||||
|
||||
aligned_dir = args.get('aligned_dir', None)
|
||||
|
||||
avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)
|
||||
|
||||
try:
|
||||
input_path = Path(args['input_dir'])
|
||||
output_path = Path(args['output_dir'])
|
||||
|
@ -233,9 +252,10 @@ def main (args, device_args):
|
|||
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
|
||||
converter = model.get_converter()
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_paths(input_path)
|
||||
alignments = None
|
||||
|
||||
if converter.type == Converter.TYPE_FACE:
|
||||
avatar_image_paths = None
|
||||
if converter.type == Converter.TYPE_FACE or converter.type == Converter.TYPE_FACE_AVATAR:
|
||||
if aligned_dir is None:
|
||||
io.log_err('Aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
@ -267,12 +287,45 @@ def main (args, device_args):
|
|||
alignments[ source_filename_stem ] = []
|
||||
|
||||
alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
|
||||
|
||||
|
||||
if converter.type == Converter.TYPE_FACE_AVATAR:
|
||||
if avaperator_aligned_dir is None:
|
||||
io.log_err('Avatar operator aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
avaperator_aligned_path = Path(avaperator_aligned_dir)
|
||||
if not avaperator_aligned_path.exists():
|
||||
io.log_err('Avatar operator aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
avatar_image_paths = []
|
||||
for filename in io.progress_bar_generator( Path_utils.get_image_paths(avaperator_aligned_path) , "Sorting avaperator faces"):
|
||||
filepath = Path(filename)
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("Fatal error: %s is not a dfl image file" % (filepath.name) )
|
||||
return
|
||||
|
||||
avatar_image_paths += [ (filename, dflimg.get_source_filename() ) ]
|
||||
avatar_image_paths = [ p[0] for p in sorted(avatar_image_paths, key=operator.itemgetter(1)) ]
|
||||
|
||||
if len(input_path_image_paths) < len(avatar_image_paths):
|
||||
io.log_err("Input faces count must be >= avatar operator faces count.")
|
||||
return
|
||||
|
||||
files_processed, faces_processed = ConvertSubprocessor (
|
||||
converter = converter,
|
||||
input_path_image_paths = Path_utils.get_image_paths(input_path),
|
||||
input_path_image_paths = input_path_image_paths,
|
||||
output_path = output_path,
|
||||
alignments = alignments,
|
||||
avatar_image_paths = avatar_image_paths,
|
||||
debug = args.get('debug',False)
|
||||
).run()
|
||||
|
||||
|
|
|
@ -10,11 +10,13 @@ import mathlib
|
|||
import imagelib
|
||||
import cv2
|
||||
from utils import Path_utils
|
||||
from utils.DFLPNG import DFLPNG
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.cv2_utils import *
|
||||
import facelib
|
||||
from facelib import FaceType
|
||||
from facelib import LandmarksProcessor
|
||||
from facelib import FANSegmentator
|
||||
from nnlib import nnlib
|
||||
from joblib import Subprocessor
|
||||
from interact import interact as io
|
||||
|
@ -79,7 +81,12 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.second_pass_e.__enter__()
|
||||
else:
|
||||
self.second_pass_e = None
|
||||
|
||||
|
||||
elif self.type == 'fanseg':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.FANSegmentator(256, FaceType.toString(FaceType.FULL) )
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'final':
|
||||
pass
|
||||
|
||||
|
@ -124,6 +131,8 @@ class ExtractSubprocessor(Subprocessor):
|
|||
h, w, ch = image.shape
|
||||
if h == w:
|
||||
#extracting from already extracted jpg image?
|
||||
if filename_path.suffix == '.png':
|
||||
src_dflimg = DFLPNG.load ( str(filename_path) )
|
||||
if filename_path.suffix == '.jpg':
|
||||
src_dflimg = DFLJPG.load ( str(filename_path) )
|
||||
|
||||
|
@ -253,15 +262,22 @@ class ExtractSubprocessor(Subprocessor):
|
|||
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||
|
||||
return data
|
||||
|
||||
|
||||
|
||||
elif self.type == 'fanseg':
|
||||
if src_dflimg is not None:
|
||||
fanseg_mask = self.e.extract( image / 255.0 )
|
||||
src_dflimg.embed_and_set( filename_path_str,
|
||||
fanseg_mask=fanseg_mask,
|
||||
#fanseg_mask_ver=FANSegmentator.VERSION,
|
||||
)
|
||||
|
||||
#overridable
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data.filename
|
||||
|
||||
#override
|
||||
def __init__(self, input_data, type, image_size, face_type, debug_dir=None, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, final_output_path=None):
|
||||
def __init__(self, input_data, type, image_size=None, face_type=None, debug_dir=None, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, final_output_path=None):
|
||||
self.input_data = input_data
|
||||
self.type = type
|
||||
self.image_size = image_size
|
||||
|
@ -561,7 +577,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
if 'cpu' in backend:
|
||||
cpu_only = True
|
||||
|
||||
if 'rects' in type or type == 'landmarks':
|
||||
if 'rects' in type or type == 'landmarks' or type == 'fanseg':
|
||||
if not cpu_only and type == 'rects-mt' and backend == "plaidML": #plaidML works with MT very slowly
|
||||
cpu_only = True
|
||||
|
||||
|
@ -583,7 +599,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
dev_name = nnlib.device.getDeviceName(idx)
|
||||
dev_vram = nnlib.device.getDeviceVRAMTotalGb(idx)
|
||||
|
||||
if not manual and (type == 'rects-dlib' or type == 'rects-mt'):
|
||||
if not manual and (type == 'rects-dlib' or type == 'rects-mt' ):
|
||||
for i in range ( int (max (1, dev_vram / 2) ) ):
|
||||
result += [ (idx, 'GPU', '%s #%d' % (dev_name,i) , dev_vram) ]
|
||||
else:
|
||||
|
@ -658,7 +674,34 @@ class DeletedFilesSearcherSubprocessor(Subprocessor):
|
|||
return self.result
|
||||
|
||||
|
||||
#currently unused
|
||||
def extract_fanseg(input_dir, device_args={} ):
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
||||
input_path = Path(input_dir)
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
paths_to_extract = []
|
||||
for filename in Path_utils.get_image_paths(input_path) :
|
||||
filepath = Path(filename)
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
if dflimg is not None:
|
||||
paths_to_extract.append (filepath)
|
||||
|
||||
paths_to_extract_len = len(paths_to_extract)
|
||||
if paths_to_extract_len > 0:
|
||||
io.log_info ("Performing extract fanseg for %d files..." % (paths_to_extract_len) )
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in paths_to_extract ], 'fanseg', multi_gpu=multi_gpu, cpu_only=cpu_only).run()
|
||||
|
||||
|
||||
def main(input_dir,
|
||||
output_dir,
|
||||
debug_dir=None,
|
||||
|
|
|
@ -397,13 +397,17 @@ def mask_editor_main(input_dir, confirmed_dir=None, skipped_dir=None):
|
|||
else:
|
||||
lmrks = dflimg.get_landmarks()
|
||||
ie_polys = dflimg.get_ie_polys()
|
||||
fanseg_mask = dflimg.get_fanseg_mask()
|
||||
|
||||
if filepath.name in cached_images:
|
||||
img = cached_images[filepath.name]
|
||||
else:
|
||||
img = cached_images[filepath.name] = cv2_imread(str(filepath)) / 255.0
|
||||
|
||||
mask = LandmarksProcessor.get_image_hull_mask( img.shape, lmrks)
|
||||
if fanseg_mask is not None:
|
||||
mask = fanseg_mask
|
||||
else:
|
||||
mask = LandmarksProcessor.get_image_hull_mask( img.shape, lmrks)
|
||||
else:
|
||||
img = np.zeros ( (target_wh,target_wh,3) )
|
||||
mask = np.ones ( (target_wh,target_wh,3) )
|
||||
|
|
|
@ -7,6 +7,33 @@ from utils.cv2_utils import *
|
|||
from facelib import LandmarksProcessor
|
||||
from interact import interact as io
|
||||
|
||||
def remove_fanseg_file (filepath):
|
||||
filepath = Path(filepath)
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
return
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
return
|
||||
|
||||
dflimg.remove_fanseg_mask()
|
||||
dflimg.embed_and_set( str(filepath) )
|
||||
|
||||
|
||||
def remove_fanseg_folder(input_path):
|
||||
input_path = Path(input_path)
|
||||
|
||||
io.log_info ("Removing fanseg mask...\r\n")
|
||||
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Removing"):
|
||||
filepath = Path(filepath)
|
||||
remove_fanseg_file(filepath)
|
||||
|
||||
def convert_png_to_jpg_file (filepath):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue