mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 04:52:13 -07:00
DFL-2.0 initial branch commit
This commit is contained in:
parent
52a67a61b3
commit
38b85108b3
154 changed files with 5251 additions and 9414 deletions
|
@ -7,10 +7,10 @@ import numpy as np
|
|||
|
||||
from DFLIMG import *
|
||||
from facelib import FaceType, LandmarksProcessor
|
||||
from interact import interact as io
|
||||
from joblib import Subprocessor
|
||||
from utils import Path_utils
|
||||
from utils.cv2_utils import *
|
||||
from core.interact import interact as io
|
||||
from core.joblib import Subprocessor
|
||||
from core import pathex
|
||||
from core.cv2ex import *
|
||||
|
||||
from . import Extractor, Sorter
|
||||
from .Extractor import ExtractSubprocessor
|
||||
|
@ -41,7 +41,7 @@ def extract_vggface2_dataset(input_dir, device_args={} ):
|
|||
|
||||
output_path = input_path.parent / (input_path.name + '_out')
|
||||
|
||||
dir_names = Path_utils.get_all_dir_names(input_path)
|
||||
dir_names = pathex.get_all_dir_names(input_path)
|
||||
|
||||
if not output_path.exists():
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
@ -54,7 +54,7 @@ def extract_vggface2_dataset(input_dir, device_args={} ):
|
|||
if not cur_output_path.exists():
|
||||
cur_output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_paths(cur_input_path)
|
||||
input_path_image_paths = pathex.get_image_paths(cur_input_path)
|
||||
|
||||
for filename in input_path_image_paths:
|
||||
filename_path = Path(filename)
|
||||
|
@ -116,7 +116,7 @@ def extract_vggface2_dataset(input_dir, device_args={} ):
|
|||
cur_input_path = input_path / dir_name
|
||||
cur_output_path = output_path / dir_name
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_paths(cur_input_path)
|
||||
input_path_image_paths = pathex.get_image_paths(cur_input_path)
|
||||
l = len(input_path_image_paths)
|
||||
#if l < 250 or l > 350:
|
||||
# continue
|
||||
|
@ -176,7 +176,7 @@ def extract_vggface2_dataset(input_dir, device_args={} ):
|
|||
|
||||
output_path = input_path.parent / (input_path.name + '_out')
|
||||
|
||||
dir_names = Path_utils.get_all_dir_names(input_path)
|
||||
dir_names = pathex.get_all_dir_names(input_path)
|
||||
|
||||
if not output_path.exists():
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
@ -188,7 +188,7 @@ def extract_vggface2_dataset(input_dir, device_args={} ):
|
|||
cur_input_path = input_path / dir_name
|
||||
cur_output_path = output_path / dir_name
|
||||
|
||||
l = len(Path_utils.get_image_paths(cur_input_path))
|
||||
l = len(pathex.get_image_paths(cur_input_path))
|
||||
if l < 250 or l > 350:
|
||||
continue
|
||||
|
||||
|
@ -316,13 +316,13 @@ def apply_celebamaskhq(input_dir ):
|
|||
if not img_path.exists():
|
||||
raise ValueError(f'{str(img_path)} directory not found. Please ensure it exists.')
|
||||
|
||||
CelebAMASKHQSubprocessor(Path_utils.get_image_paths(img_path),
|
||||
Path_utils.get_image_paths(mask_path, subdirs=True) ).run()
|
||||
CelebAMASKHQSubprocessor(pathex.get_image_paths(img_path),
|
||||
pathex.get_image_paths(mask_path, subdirs=True) ).run()
|
||||
|
||||
return
|
||||
|
||||
paths_to_extract = []
|
||||
for filename in io.progress_bar_generator(Path_utils.get_image_paths(img_path), desc="Processing"):
|
||||
for filename in io.progress_bar_generator(pathex.get_image_paths(img_path), desc="Processing"):
|
||||
filepath = Path(filename)
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
|
||||
|
@ -381,7 +381,7 @@ def extract_fanseg(input_dir, device_args={} ):
|
|||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
paths_to_extract = []
|
||||
for filename in Path_utils.get_image_paths(input_path) :
|
||||
for filename in pathex.get_image_paths(input_path) :
|
||||
filepath = Path(filename)
|
||||
dflimg = DFLIMG.load ( filepath )
|
||||
if dflimg is not None:
|
||||
|
@ -413,7 +413,7 @@ def extract_umd_csv(input_file_csv,
|
|||
io.log_info("Output dir is %s." % (str(output_path)) )
|
||||
|
||||
if output_path.exists():
|
||||
output_images_paths = Path_utils.get_image_paths(output_path)
|
||||
output_images_paths = pathex.get_image_paths(output_path)
|
||||
if len(output_images_paths) > 0:
|
||||
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
|
||||
for filename in output_images_paths:
|
||||
|
@ -443,12 +443,7 @@ def extract_umd_csv(input_file_csv,
|
|||
data = []
|
||||
for d in csv_data:
|
||||
filename = input_file_csv_root_path / d['FILE']
|
||||
|
||||
#pitch, yaw, roll = float(d['PITCH']), float(d['YAW']), float(d['ROLL'])
|
||||
#if pitch < -90 or pitch > 90 or yaw < -90 or yaw > 90 or roll < -90 or roll > 90:
|
||||
# continue
|
||||
#
|
||||
#pitch_yaw_roll = pitch/90.0, yaw/90.0, roll/90.0
|
||||
|
||||
|
||||
x,y,w,h = float(d['FACE_X']), float(d['FACE_Y']), float(d['FACE_WIDTH']), float(d['FACE_HEIGHT'])
|
||||
|
||||
|
@ -473,11 +468,11 @@ def extract_umd_csv(input_file_csv,
|
|||
def dev_test(input_dir):
|
||||
input_path = Path(input_dir)
|
||||
|
||||
dir_names = Path_utils.get_all_dir_names(input_path)
|
||||
dir_names = pathex.get_all_dir_names(input_path)
|
||||
|
||||
for dir_name in io.progress_bar_generator(dir_names, desc="Processing"):
|
||||
|
||||
img_paths = Path_utils.get_image_paths (input_path / dir_name)
|
||||
img_paths = pathex.get_image_paths (input_path / dir_name)
|
||||
for filename in img_paths:
|
||||
filepath = Path(filename)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue