mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 06:23:20 -07:00
Formatting
This commit is contained in:
parent
62c7be73b6
commit
9a41965063
1 changed files with 81 additions and 70 deletions
|
@ -7,7 +7,6 @@ import numpy as np
|
|||
import imagelib
|
||||
from facelib import FaceType, LandmarksProcessor
|
||||
|
||||
|
||||
"""
|
||||
output_sample_types = [
|
||||
{} opts,
|
||||
|
@ -42,6 +41,7 @@ opts:
|
|||
|
||||
"""
|
||||
|
||||
|
||||
class SampleProcessor(object):
|
||||
class Types(IntEnum):
|
||||
NONE = 0
|
||||
|
@ -73,7 +73,8 @@ class SampleProcessor(object):
|
|||
|
||||
class Options(object):
|
||||
|
||||
def __init__(self, random_flip = True, rotation_range=[-10,10], scale_range=[-0.05, 0.05], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
|
||||
def __init__(self, random_flip=True, rotation_range=[-10, 10], scale_range=[-0.05, 0.05],
|
||||
tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05]):
|
||||
self.random_flip = random_flip
|
||||
self.rotation_range = rotation_range
|
||||
self.scale_range = scale_range
|
||||
|
@ -94,7 +95,11 @@ class SampleProcessor(object):
|
|||
if debug and is_face_sample:
|
||||
LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks, (0, 1, 0))
|
||||
|
||||
params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )
|
||||
params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip,
|
||||
rotation_range=sample_process_options.rotation_range,
|
||||
scale_range=sample_process_options.scale_range,
|
||||
tx_range=sample_process_options.tx_range,
|
||||
ty_range=sample_process_options.ty_range)
|
||||
|
||||
cached_images = collections.defaultdict(dict)
|
||||
|
||||
|
@ -196,8 +201,11 @@ class SampleProcessor(object):
|
|||
if is_face_sample and target_face_type != SPTF.NONE:
|
||||
ft = SPTF_FACETYPE_TO_FACETYPE[target_face_type]
|
||||
if ft > sample.face_type:
|
||||
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, ft) )
|
||||
img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, ft), (resolution,resolution), flags=cv2.INTER_CUBIC )
|
||||
raise Exception(
|
||||
'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (
|
||||
sample.filename, sample.face_type, ft))
|
||||
img = cv2.warpAffine(img, LandmarksProcessor.get_transform_mat(sample.landmarks, resolution, ft),
|
||||
(resolution, resolution), flags=cv2.INTER_CUBIC)
|
||||
else:
|
||||
img = cv2.resize(img, (resolution, resolution), cv2.INTER_CUBIC)
|
||||
|
||||
|
@ -237,7 +245,9 @@ class SampleProcessor(object):
|
|||
elif mode_type == SPTF.MODE_G:
|
||||
img = np.concatenate((np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1), img_mask), -1)
|
||||
elif mode_type == SPTF.MODE_GGG:
|
||||
img = np.concatenate ( ( np.repeat ( np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1), (3,), -1), img_mask), -1)
|
||||
img = np.concatenate(
|
||||
(np.repeat(np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1), (3,), -1), img_mask),
|
||||
-1)
|
||||
elif mode_type == SPTF.MODE_M and is_face_sample:
|
||||
img = img_mask
|
||||
|
||||
|
@ -262,6 +272,7 @@ class SampleProcessor(object):
|
|||
else:
|
||||
return outputs
|
||||
|
||||
|
||||
"""
|
||||
close_sample = sample.close_target_list[ np.random.randint(0, len(sample.close_target_list)) ] if sample.close_target_list is not None else None
|
||||
close_sample_bgr = close_sample.load_bgr() if close_sample is not None else None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue