mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-19 21:13:20 -07:00
SAE : WARNING, RETRAIN IS REQUIRED !
fixed model sizes from previous update. avoided bug in ML framework(keras) that forces to train the model on random noise. Converter: added blur on the same keys as sharpness Added new model 'TrueFace'. This is a GAN model ported from https://github.com/NVlabs/FUNIT Model produces near zero morphing and high detail face. Model has higher failure rate than other models. Keep src and dst faceset in same lighting conditions.
This commit is contained in:
parent
201b762541
commit
dc11ec32be
26 changed files with 1308 additions and 250 deletions
|
@ -87,22 +87,26 @@ class ConvertSubprocessor(Subprocessor):
|
|||
#therefore forcing active_DeviceConfig to CPU only
|
||||
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||
|
||||
def sharpen_func (img, sharpen_mode=0, kernel_size=3, amount=150):
|
||||
def blursharpen_func (img, sharpen_mode=0, kernel_size=3, amount=100):
|
||||
if kernel_size % 2 == 0:
|
||||
kernel_size += 1
|
||||
|
||||
if sharpen_mode == 1: #box
|
||||
kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32)
|
||||
kernel[ kernel_size//2, kernel_size//2] = 1.0
|
||||
box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2)
|
||||
kernel = kernel + (kernel - box_filter) * amount
|
||||
return cv2.filter2D(img, -1, kernel)
|
||||
elif sharpen_mode == 2: #gaussian
|
||||
if amount > 0:
|
||||
if sharpen_mode == 1: #box
|
||||
kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32)
|
||||
kernel[ kernel_size//2, kernel_size//2] = 1.0
|
||||
box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2)
|
||||
kernel = kernel + (kernel - box_filter) * amount
|
||||
return cv2.filter2D(img, -1, kernel)
|
||||
elif sharpen_mode == 2: #gaussian
|
||||
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
|
||||
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
|
||||
return img
|
||||
elif amount < 0:
|
||||
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
|
||||
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
|
||||
return img
|
||||
img = cv2.addWeighted(img, 1.0 - a / 50.0, blur, a /50.0, 0)
|
||||
return img
|
||||
return img
|
||||
self.sharpen_func = sharpen_func
|
||||
self.blursharpen_func = blursharpen_func
|
||||
|
||||
self.fanseg_by_face_type = {}
|
||||
self.fanseg_input_size = 256
|
||||
|
@ -128,7 +132,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
#override
|
||||
def process_data(self, pf): #pf=ProcessingFrame
|
||||
cfg = pf.cfg.copy()
|
||||
cfg.sharpen_func = self.sharpen_func
|
||||
cfg.blursharpen_func = self.blursharpen_func
|
||||
cfg.superres_func = self.superres_func
|
||||
cfg.ebs_ct_func = self.ebs_ct_func
|
||||
|
||||
|
@ -221,11 +225,13 @@ class ConvertSubprocessor(Subprocessor):
|
|||
|
||||
session_data = None
|
||||
if self.is_interactive and self.converter_session_filepath.exists():
|
||||
try:
|
||||
with open( str(self.converter_session_filepath), "rb") as f:
|
||||
session_data = pickle.loads(f.read())
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if io.input_bool ("Use saved session? (y/n skip:y) : ", True):
|
||||
try:
|
||||
with open( str(self.converter_session_filepath), "rb") as f:
|
||||
session_data = pickle.loads(f.read())
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
self.frames = frames
|
||||
self.frames_idxs = [ *range(len(self.frames)) ]
|
||||
|
@ -430,9 +436,9 @@ class ConvertSubprocessor(Subprocessor):
|
|||
elif chr_key == 'g':
|
||||
cfg.add_color_degrade_power(-1 if not shift_pressed else -5)
|
||||
elif chr_key == 'y':
|
||||
cfg.add_sharpen_amount(1 if not shift_pressed else 5)
|
||||
cfg.add_blursharpen_amount(1 if not shift_pressed else 5)
|
||||
elif chr_key == 'h':
|
||||
cfg.add_sharpen_amount(-1 if not shift_pressed else -5)
|
||||
cfg.add_blursharpen_amount(-1 if not shift_pressed else -5)
|
||||
elif chr_key == 'u':
|
||||
cfg.add_output_face_scale(1 if not shift_pressed else 5)
|
||||
elif chr_key == 'j':
|
||||
|
@ -453,9 +459,9 @@ class ConvertSubprocessor(Subprocessor):
|
|||
|
||||
else:
|
||||
if chr_key == 'y':
|
||||
cfg.add_sharpen_amount(1 if not shift_pressed else 5)
|
||||
cfg.add_blursharpen_amount(1 if not shift_pressed else 5)
|
||||
elif chr_key == 'h':
|
||||
cfg.add_sharpen_amount(-1 if not shift_pressed else -5)
|
||||
cfg.add_blursharpen_amount(-1 if not shift_pressed else -5)
|
||||
elif chr_key == 's':
|
||||
cfg.toggle_add_source_image()
|
||||
elif chr_key == 'v':
|
||||
|
@ -576,6 +582,8 @@ class ConvertSubprocessor(Subprocessor):
|
|||
def main (args, device_args):
|
||||
io.log_info ("Running converter.\r\n")
|
||||
|
||||
training_data_src_dir = args.get('training_data_src_dir', None)
|
||||
training_data_src_path = Path(training_data_src_dir) if training_data_src_dir is not None else None
|
||||
aligned_dir = args.get('aligned_dir', None)
|
||||
avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)
|
||||
|
||||
|
@ -598,7 +606,7 @@ def main (args, device_args):
|
|||
is_interactive = io.input_bool ("Use interactive converter? (y/n skip:y) : ", True) if not io.is_colab() else False
|
||||
|
||||
import models
|
||||
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
|
||||
model = models.import_model( args['model_name'])(model_path, device_args=device_args, training_data_src_path=training_data_src_path)
|
||||
converter_session_filepath = model.get_strpath_storage_for_file('converter_session.dat')
|
||||
predictor_func, predictor_input_shape, cfg = model.get_ConverterConfig()
|
||||
|
||||
|
|
|
@ -1,26 +1,27 @@
|
|||
import traceback
|
||||
import math
|
||||
import multiprocessing
|
||||
import operator
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import multiprocessing
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import math
|
||||
import mathlib
|
||||
import imagelib
|
||||
|
||||
import cv2
|
||||
from utils import Path_utils
|
||||
from utils.DFLPNG import DFLPNG
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.cv2_utils import *
|
||||
import numpy as np
|
||||
|
||||
import facelib
|
||||
from facelib import FaceType
|
||||
from facelib import LandmarksProcessor
|
||||
from facelib import FANSegmentator
|
||||
from nnlib import nnlib
|
||||
from joblib import Subprocessor
|
||||
import imagelib
|
||||
import mathlib
|
||||
from facelib import FaceType, FANSegmentator, LandmarksProcessor
|
||||
from interact import interact as io
|
||||
from joblib import Subprocessor
|
||||
from nnlib import nnlib
|
||||
from utils import Path_utils
|
||||
from utils.cv2_utils import *
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.DFLPNG import DFLPNG
|
||||
|
||||
DEBUG = False
|
||||
|
||||
|
@ -43,6 +44,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.type = client_dict['type']
|
||||
self.image_size = client_dict['image_size']
|
||||
self.face_type = client_dict['face_type']
|
||||
self.max_faces_from_image = client_dict['max_faces_from_image']
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.cpu_only = client_dict['device_type'] == 'CPU'
|
||||
self.final_output_path = Path(client_dict['final_output_dir']) if 'final_output_dir' in client_dict.keys() else None
|
||||
|
@ -154,6 +156,13 @@ class ExtractSubprocessor(Subprocessor):
|
|||
rects = data.rects = self.e.extract (rotated_image, is_bgr=True)
|
||||
if len(rects) != 0:
|
||||
break
|
||||
|
||||
if self.max_faces_from_image != 0 and len(data.rects) > 1:
|
||||
#sort by largest area first
|
||||
x = [ [(l,t,r,b), (r-l)*(b-t) ] for (l,t,r,b) in data.rects]
|
||||
x = sorted(x, key=operator.itemgetter(1), reverse=True )
|
||||
x = [ a[0] for a in x]
|
||||
data.rects = x[0:self.max_faces_from_image]
|
||||
|
||||
return data
|
||||
|
||||
|
@ -283,7 +292,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
return data.filename
|
||||
|
||||
#override
|
||||
def __init__(self, input_data, type, image_size=None, face_type=None, debug_dir=None, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, final_output_path=None):
|
||||
def __init__(self, input_data, type, image_size=None, face_type=None, debug_dir=None, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, max_faces_from_image=0, final_output_path=None):
|
||||
self.input_data = input_data
|
||||
self.type = type
|
||||
self.image_size = image_size
|
||||
|
@ -292,6 +301,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.final_output_path = final_output_path
|
||||
self.manual = manual
|
||||
self.manual_window_size = manual_window_size
|
||||
self.max_faces_from_image = max_faces_from_image
|
||||
self.result = []
|
||||
|
||||
self.devices = ExtractSubprocessor.get_devices_for_config(self.manual, self.type, multi_gpu, cpu_only)
|
||||
|
@ -341,6 +351,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
base_dict = {'type' : self.type,
|
||||
'image_size': self.image_size,
|
||||
'face_type': self.face_type,
|
||||
'max_faces_from_image':self.max_faces_from_image,
|
||||
'debug_dir': self.debug_dir,
|
||||
'final_output_dir': str(self.final_output_path),
|
||||
'stdin_fd': sys.stdin.fileno() }
|
||||
|
@ -681,8 +692,96 @@ class DeletedFilesSearcherSubprocessor(Subprocessor):
|
|||
def get_result(self):
|
||||
return self.result
|
||||
|
||||
def main(input_dir,
|
||||
output_dir,
|
||||
debug_dir=None,
|
||||
detector='mt',
|
||||
manual_fix=False,
|
||||
manual_output_debug_fix=False,
|
||||
manual_window_size=1368,
|
||||
image_size=256,
|
||||
face_type='full_face',
|
||||
max_faces_from_image=0,
|
||||
device_args={}):
|
||||
|
||||
#currently unused
|
||||
input_path = Path(input_dir)
|
||||
output_path = Path(output_dir)
|
||||
face_type = FaceType.fromString(face_type)
|
||||
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
if output_path.exists():
|
||||
if not manual_output_debug_fix and input_path != output_path:
|
||||
output_images_paths = Path_utils.get_image_paths(output_path)
|
||||
if len(output_images_paths) > 0:
|
||||
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
|
||||
for filename in output_images_paths:
|
||||
Path(filename).unlink()
|
||||
else:
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if debug_dir is None:
|
||||
raise ValueError('debug-dir must be specified')
|
||||
detector = 'manual'
|
||||
io.log_info('Performing re-extract frames which were deleted from _debug directory.')
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose_print_func=io.log_info)
|
||||
if debug_dir is not None:
|
||||
debug_output_path = Path(debug_dir)
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if not debug_output_path.exists():
|
||||
raise ValueError("%s not found " % ( str(debug_output_path) ))
|
||||
|
||||
input_path_image_paths = DeletedFilesSearcherSubprocessor (input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).run()
|
||||
input_path_image_paths = sorted (input_path_image_paths)
|
||||
io.log_info('Found %d images.' % (len(input_path_image_paths)))
|
||||
else:
|
||||
if debug_output_path.exists():
|
||||
for filename in Path_utils.get_image_paths(debug_output_path):
|
||||
Path(filename).unlink()
|
||||
else:
|
||||
debug_output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
images_found = len(input_path_image_paths)
|
||||
faces_detected = 0
|
||||
if images_found != 0:
|
||||
if detector == 'manual':
|
||||
io.log_info ('Performing manual extract...')
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug_dir, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).run()
|
||||
else:
|
||||
io.log_info ('Performing 1st pass...')
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in input_path_image_paths ], 'rects-'+detector, image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, max_faces_from_image=max_faces_from_image).run()
|
||||
|
||||
io.log_info ('Performing 2nd pass...')
|
||||
data = ExtractSubprocessor (data, 'landmarks', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run()
|
||||
|
||||
io.log_info ('Performing 3rd pass...')
|
||||
data = ExtractSubprocessor (data, 'final', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, final_output_path=output_path).run()
|
||||
faces_detected += sum([d.faces_detected for d in data])
|
||||
|
||||
if manual_fix:
|
||||
if all ( np.array ( [ d.faces_detected > 0 for d in data] ) == True ):
|
||||
io.log_info ('All faces are detected, manual fix not needed.')
|
||||
else:
|
||||
fix_data = [ ExtractSubprocessor.Data(d.filename) for d in data if d.faces_detected == 0 ]
|
||||
io.log_info ('Performing manual fix for %d images...' % (len(fix_data)) )
|
||||
fix_data = ExtractSubprocessor (fix_data, 'landmarks', image_size, face_type, debug_dir, manual=True, manual_window_size=manual_window_size).run()
|
||||
fix_data = ExtractSubprocessor (fix_data, 'final', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, final_output_path=output_path).run()
|
||||
faces_detected += sum([d.faces_detected for d in fix_data])
|
||||
|
||||
|
||||
io.log_info ('-------------------------')
|
||||
io.log_info ('Images found: %d' % (images_found) )
|
||||
io.log_info ('Faces detected: %d' % (faces_detected) )
|
||||
io.log_info ('-------------------------')
|
||||
|
||||
#unused in end user workflow
|
||||
def extract_fanseg(input_dir, device_args={} ):
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
@ -709,6 +808,7 @@ def extract_fanseg(input_dir, device_args={} ):
|
|||
io.log_info ("Performing extract fanseg for %d files..." % (paths_to_extract_len) )
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in paths_to_extract ], 'fanseg', multi_gpu=multi_gpu, cpu_only=cpu_only).run()
|
||||
|
||||
#unused in end user workflow
|
||||
def extract_umd_csv(input_file_csv,
|
||||
image_size=256,
|
||||
face_type='full_face',
|
||||
|
@ -781,94 +881,6 @@ def extract_umd_csv(input_file_csv,
|
|||
faces_detected += sum([d.faces_detected for d in data])
|
||||
|
||||
|
||||
io.log_info ('-------------------------')
|
||||
io.log_info ('Images found: %d' % (images_found) )
|
||||
io.log_info ('Faces detected: %d' % (faces_detected) )
|
||||
io.log_info ('-------------------------')
|
||||
|
||||
def main(input_dir,
|
||||
output_dir,
|
||||
debug_dir=None,
|
||||
detector='mt',
|
||||
manual_fix=False,
|
||||
manual_output_debug_fix=False,
|
||||
manual_window_size=1368,
|
||||
image_size=256,
|
||||
face_type='full_face',
|
||||
device_args={}):
|
||||
|
||||
input_path = Path(input_dir)
|
||||
output_path = Path(output_dir)
|
||||
face_type = FaceType.fromString(face_type)
|
||||
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
if output_path.exists():
|
||||
if not manual_output_debug_fix and input_path != output_path:
|
||||
output_images_paths = Path_utils.get_image_paths(output_path)
|
||||
if len(output_images_paths) > 0:
|
||||
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
|
||||
for filename in output_images_paths:
|
||||
Path(filename).unlink()
|
||||
else:
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if debug_dir is None:
|
||||
raise ValueError('debug-dir must be specified')
|
||||
detector = 'manual'
|
||||
io.log_info('Performing re-extract frames which were deleted from _debug directory.')
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose_print_func=io.log_info)
|
||||
if debug_dir is not None:
|
||||
debug_output_path = Path(debug_dir)
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if not debug_output_path.exists():
|
||||
raise ValueError("%s not found " % ( str(debug_output_path) ))
|
||||
|
||||
input_path_image_paths = DeletedFilesSearcherSubprocessor (input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).run()
|
||||
input_path_image_paths = sorted (input_path_image_paths)
|
||||
io.log_info('Found %d images.' % (len(input_path_image_paths)))
|
||||
else:
|
||||
if debug_output_path.exists():
|
||||
for filename in Path_utils.get_image_paths(debug_output_path):
|
||||
Path(filename).unlink()
|
||||
else:
|
||||
debug_output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
images_found = len(input_path_image_paths)
|
||||
faces_detected = 0
|
||||
if images_found != 0:
|
||||
if detector == 'manual':
|
||||
io.log_info ('Performing manual extract...')
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug_dir, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).run()
|
||||
else:
|
||||
io.log_info ('Performing 1st pass...')
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in input_path_image_paths ], 'rects-'+detector, image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run()
|
||||
|
||||
io.log_info ('Performing 2nd pass...')
|
||||
data = ExtractSubprocessor (data, 'landmarks', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run()
|
||||
|
||||
io.log_info ('Performing 3rd pass...')
|
||||
data = ExtractSubprocessor (data, 'final', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, final_output_path=output_path).run()
|
||||
faces_detected += sum([d.faces_detected for d in data])
|
||||
|
||||
if manual_fix:
|
||||
if all ( np.array ( [ d.faces_detected > 0 for d in data] ) == True ):
|
||||
io.log_info ('All faces are detected, manual fix not needed.')
|
||||
else:
|
||||
fix_data = [ ExtractSubprocessor.Data(d.filename) for d in data if d.faces_detected == 0 ]
|
||||
io.log_info ('Performing manual fix for %d images...' % (len(fix_data)) )
|
||||
fix_data = ExtractSubprocessor (fix_data, 'landmarks', image_size, face_type, debug_dir, manual=True, manual_window_size=manual_window_size).run()
|
||||
fix_data = ExtractSubprocessor (fix_data, 'final', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, final_output_path=output_path).run()
|
||||
faces_detected += sum([d.faces_detected for d in fix_data])
|
||||
|
||||
|
||||
io.log_info ('-------------------------')
|
||||
io.log_info ('Images found: %d' % (images_found) )
|
||||
io.log_info ('Faces detected: %d' % (faces_detected) )
|
||||
|
|
|
@ -45,6 +45,7 @@ def trainerThread (s2c, c2s, e, args, device_args):
|
|||
training_data_src_path=training_data_src_path,
|
||||
training_data_dst_path=training_data_dst_path,
|
||||
pretraining_data_path=pretraining_data_path,
|
||||
is_training=True,
|
||||
debug=debug,
|
||||
device_args=device_args)
|
||||
|
||||
|
|
50
mainscripts/dev_misc.py
Normal file
50
mainscripts/dev_misc.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
from . import Extractor
|
||||
from . import Sorter
|
||||
from pathlib import Path
|
||||
from utils import Path_utils
|
||||
import shutil
|
||||
from interact import interact as io
|
||||
|
||||
def extract_vggface2_dataset(input_dir, device_args={} ):
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
||||
input_path = Path(input_dir)
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
output_path = input_path.parent / (input_path.name + '_out')
|
||||
|
||||
dir_names = Path_utils.get_all_dir_names(input_path)
|
||||
|
||||
if not output_path.exists():
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
|
||||
for dir_name in dir_names:
|
||||
|
||||
cur_input_path = input_path / dir_name
|
||||
cur_output_path = output_path / dir_name
|
||||
|
||||
io.log_info (f"Processing: {str(cur_input_path)} ")
|
||||
|
||||
if not cur_output_path.exists():
|
||||
cur_output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
Extractor.main( str(cur_input_path),
|
||||
str(cur_output_path),
|
||||
detector='s3fd',
|
||||
image_size=256,
|
||||
face_type='full_face',
|
||||
max_faces_from_image=1,
|
||||
device_args=device_args )
|
||||
|
||||
io.log_info (f"Sorting: {str(cur_input_path)} ")
|
||||
Sorter.main (input_path=str(cur_output_path), sort_by_method='hist')
|
||||
|
||||
try:
|
||||
io.log_info (f"Removing: {str(cur_input_path)} ")
|
||||
shutil.rmtree(cur_input_path)
|
||||
except:
|
||||
io.log_info (f"unable to remove: {str(cur_input_path)} ")
|
Binary file not shown.
Before Width: | Height: | Size: 159 KiB After Width: | Height: | Size: 222 KiB |
Binary file not shown.
Binary file not shown.
Before Width: | Height: | Size: 288 KiB After Width: | Height: | Size: 385 KiB |
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue