mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-05 20:42:11 -07:00
refactoring
This commit is contained in:
parent
9365e42f25
commit
54548afe1a
6 changed files with 110 additions and 25 deletions
|
@ -2,11 +2,7 @@ import numpy as np
|
|||
import cv2
|
||||
from core import randomex
|
||||
|
||||
def gen_warp_params (source, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None ):
|
||||
h,w,c = source.shape
|
||||
if (h != w):
|
||||
raise ValueError ('gen_warp_params accepts only square images.')
|
||||
|
||||
def gen_warp_params (w, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None ):
|
||||
if rnd_state is None:
|
||||
rnd_state = np.random
|
||||
|
||||
|
|
|
@ -308,6 +308,15 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
|||
mat = cv2.getAffineTransform(l_t,pts2)
|
||||
return mat
|
||||
|
||||
def get_rect_from_landmarks(image_landmarks):
|
||||
mat = get_transform_mat(image_landmarks, 256, FaceType.FULL_NO_ALIGN)
|
||||
|
||||
g_p = transform_points ( np.float32([(0,0),(255,255) ]) , mat, True)
|
||||
|
||||
(l,t,r,b) = g_p[0][0], g_p[0][1], g_p[1][0], g_p[1][1]
|
||||
|
||||
return (l,t,r,b)
|
||||
|
||||
def expand_eyebrows(lmrks, eyebrows_expand_mod=1.0):
|
||||
if len(lmrks) != 68:
|
||||
raise Exception('works only with 68 landmarks')
|
||||
|
@ -649,7 +658,7 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), draw_circles=True,
|
|||
mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys=ie_polys)
|
||||
image[...] = ( image * (1-mask) + image * mask / 2 )[...]
|
||||
|
||||
def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0)):
|
||||
def draw_rect_landmarks (image, rect, image_landmarks, face_type, face_size=256, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0)):
|
||||
draw_landmarks(image, image_landmarks, color=landmarks_color, transparent_mask=transparent_mask, ie_polys=ie_polys)
|
||||
imagelib.draw_rect (image, rect, (255,0,0), 2 )
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
continue
|
||||
|
||||
if output_debug_path is not None:
|
||||
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, image_size, face_type, transparent_mask=True)
|
||||
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, face_type, image_size, transparent_mask=True)
|
||||
|
||||
output_path = final_output_path
|
||||
if data.force_output_path is not None:
|
||||
|
@ -601,7 +601,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
view_landmarks = LandmarksProcessor.transform_points (view_landmarks, mat)
|
||||
|
||||
landmarks_color = (255,255,0) if self.rect_locked else (0,255,0)
|
||||
LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.image_size, self.face_type, landmarks_color=landmarks_color)
|
||||
LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.face_type, self.image_size, landmarks_color=landmarks_color)
|
||||
self.extract_needed = False
|
||||
|
||||
io.show_image (self.wnd_name, image)
|
||||
|
|
|
@ -4,7 +4,7 @@ from pathlib import Path
|
|||
import cv2
|
||||
|
||||
from DFLIMG import *
|
||||
from facelib import LandmarksProcessor
|
||||
from facelib import LandmarksProcessor, FaceType
|
||||
from core.imagelib import IEPolys
|
||||
from core.interact import interact as io
|
||||
from core import pathex
|
||||
|
@ -167,11 +167,19 @@ def add_landmarks_debug_images(input_path):
|
|||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
|
||||
if img is not None:
|
||||
face_landmarks = dflimg.get_landmarks()
|
||||
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load(dflimg.get_ie_polys()) )
|
||||
|
||||
face_type = FaceType.fromString ( dflimg.get_face_type() )
|
||||
|
||||
if face_type == FaceType.MARK_ONLY:
|
||||
rect = dflimg.get_source_rect()
|
||||
LandmarksProcessor.draw_rect_landmarks(img, rect, face_landmarks, FaceType.FULL )
|
||||
else:
|
||||
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load(dflimg.get_ie_polys()) )
|
||||
|
||||
|
||||
|
||||
output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
|
||||
cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from DFLIMG import *
|
|||
from facelib import FaceType, LandmarksProcessor
|
||||
from core.interact import interact as io
|
||||
from core.joblib import Subprocessor
|
||||
from core import pathex
|
||||
from core import pathex, imagelib
|
||||
from core.cv2ex import *
|
||||
|
||||
from . import Extractor, Sorter
|
||||
|
@ -392,6 +392,71 @@ def extract_fanseg(input_dir, device_args={} ):
|
|||
io.log_info ("Performing extract fanseg for %d files..." % (paths_to_extract_len) )
|
||||
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in paths_to_extract ], 'fanseg', multi_gpu=multi_gpu, cpu_only=cpu_only).run()
|
||||
|
||||
#unused in end user workflow
|
||||
def dev_test(input_dir ):
|
||||
input_path = Path(input_dir)
|
||||
if not input_path.exists():
|
||||
raise ValueError('input_dir not found. Please ensure it exists.')
|
||||
|
||||
output_path = input_path.parent / (input_path.name+'_aligned')
|
||||
|
||||
io.log_info(f'Output dir is % {output_path}')
|
||||
|
||||
if output_path.exists():
|
||||
output_images_paths = pathex.get_image_paths(output_path)
|
||||
if len(output_images_paths) > 0:
|
||||
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
|
||||
for filename in output_images_paths:
|
||||
Path(filename).unlink()
|
||||
else:
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path)
|
||||
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
pts_filepath = filepath.parent / (filepath.stem+'.pts')
|
||||
if pts_filepath.exists():
|
||||
pts = pts_filepath.read_text()
|
||||
pts_lines = pts.split('\n')
|
||||
|
||||
lmrk_lines = None
|
||||
for pts_line in pts_lines:
|
||||
if pts_line == '{':
|
||||
lmrk_lines = []
|
||||
elif pts_line == '}':
|
||||
break
|
||||
else:
|
||||
if lmrk_lines is not None:
|
||||
lmrk_lines.append (pts_line)
|
||||
|
||||
if lmrk_lines is not None and len(lmrk_lines) == 68:
|
||||
try:
|
||||
lmrks = [ np.array ( lmrk_line.strip().split(' ') ).astype(np.float32).tolist() for lmrk_line in lmrk_lines]
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(filepath)
|
||||
continue
|
||||
|
||||
rect = LandmarksProcessor.get_rect_from_landmarks(lmrks)
|
||||
|
||||
output_filepath = output_path / (filepath.stem+'.jpg')
|
||||
|
||||
img = cv2_imread(filepath)
|
||||
img = imagelib.normalize_channels(img, 3)
|
||||
cv2_imwrite(output_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95] )
|
||||
|
||||
DFLJPG.embed_data(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
|
||||
landmarks=lmrks,
|
||||
source_filename=filepath.name,
|
||||
source_rect=rect,
|
||||
source_landmarks=lmrks
|
||||
)
|
||||
|
||||
io.log_info("Done.")
|
||||
|
||||
#unused in end user workflow
|
||||
def extract_umd_csv(input_file_csv,
|
||||
face_type='full_face',
|
||||
|
@ -464,7 +529,7 @@ def extract_umd_csv(input_file_csv,
|
|||
io.log_info ('Faces detected: %d' % (faces_detected) )
|
||||
io.log_info ('-------------------------')
|
||||
|
||||
def dev_test(input_dir):
|
||||
def dev_test1(input_dir):
|
||||
input_path = Path(input_dir)
|
||||
|
||||
dir_names = pathex.get_all_dir_names(input_path)
|
||||
|
|
|
@ -51,6 +51,7 @@ class SampleProcessor(object):
|
|||
|
||||
outputs = []
|
||||
for sample in samples:
|
||||
sample_face_type = sample.face_type
|
||||
sample_bgr = sample.load_bgr()
|
||||
ct_sample_bgr = None
|
||||
h,w,c = sample_bgr.shape
|
||||
|
@ -59,8 +60,13 @@ class SampleProcessor(object):
|
|||
|
||||
if debug and is_face_sample:
|
||||
LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0))
|
||||
|
||||
params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )
|
||||
|
||||
if sample_face_type == FaceType.MARK_ONLY:
|
||||
warp_resolution = np.max( [ opts.get('resolution', 0) for opts in output_sample_types ] )
|
||||
else:
|
||||
warp_resolution = w
|
||||
|
||||
params = imagelib.gen_warp_params(warp_resolution, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )
|
||||
|
||||
outputs_sample = []
|
||||
for opts in output_sample_types:
|
||||
|
@ -124,14 +130,15 @@ class SampleProcessor(object):
|
|||
if sample.ie_polys is not None:
|
||||
sample.ie_polys.overlay_mask(img)
|
||||
|
||||
if sample.face_type == FaceType.MARK_ONLY:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], face_type)
|
||||
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_LINEAR )
|
||||
if sample_face_type == FaceType.MARK_ONLY:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, warp_resolution, face_type)
|
||||
img = cv2.warpAffine( img, mat, (warp_resolution, warp_resolution), flags=cv2.INTER_LINEAR )
|
||||
|
||||
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR )[...,None]
|
||||
else:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
|
||||
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
|
||||
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LINEAR )[...,None]
|
||||
|
||||
if channel_type == SPCT.G:
|
||||
|
@ -164,14 +171,14 @@ class SampleProcessor(object):
|
|||
if gblur_rnd_chance < chance:
|
||||
img = cv2.GaussianBlur(img, (gblur_rnd_kernel,) *2 , 0)
|
||||
|
||||
if sample.face_type == FaceType.MARK_ONLY:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], face_type)
|
||||
img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
|
||||
if sample_face_type == FaceType.MARK_ONLY:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, warp_resolution, face_type)
|
||||
img = cv2.warpAffine( img, mat, (warp_resolution,warp_resolution), flags=cv2.INTER_CUBIC )
|
||||
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||
else:
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
|
||||
else:
|
||||
img = imagelib.warp_by_params (params, img, warp, transform, can_flip=True, border_replicate=True)
|
||||
mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type)
|
||||
img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
|
||||
|
||||
img = np.clip(img.astype(np.float32), 0, 1)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue