mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-14 02:37:00 -07:00
manual extractor: increased FPS,
sort by final : now you can specify target number of images, converter: fix seamless mask and exception, huge refactoring
This commit is contained in:
parent
7db469a1da
commit
438213e97c
30 changed files with 1834 additions and 1718 deletions
|
@ -4,7 +4,6 @@ import traceback
|
|||
from pathlib import Path
|
||||
from utils import Path_utils
|
||||
import cv2
|
||||
from tqdm import tqdm
|
||||
from utils.DFLPNG import DFLPNG
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.cv2_utils import *
|
||||
|
@ -13,219 +12,196 @@ import shutil
|
|||
import numpy as np
|
||||
import time
|
||||
import multiprocessing
|
||||
from models import ConverterBase
|
||||
from converters import Converter
|
||||
from joblib import Subprocessor, SubprocessFunctionCaller
|
||||
from interact import interact as io
|
||||
|
||||
class model_process_predictor(object):
|
||||
def __init__(self, sq, cq, lock):
|
||||
self.sq = sq
|
||||
self.cq = cq
|
||||
self.lock = lock
|
||||
class ConvertSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
||||
def __call__(self, face):
|
||||
self.lock.acquire()
|
||||
|
||||
self.sq.put ( {'op': 'predict', 'face' : face} )
|
||||
while True:
|
||||
if not self.cq.empty():
|
||||
obj = self.cq.get()
|
||||
obj_op = obj['op']
|
||||
if obj_op == 'predict_result':
|
||||
self.lock.release()
|
||||
return obj['result']
|
||||
time.sleep(0.005)
|
||||
|
||||
def model_process(stdin_fd, model_name, model_dir, in_options, sq, cq):
|
||||
sys.stdin = os.fdopen(stdin_fd)
|
||||
|
||||
try:
|
||||
model_path = Path(model_dir)
|
||||
|
||||
import models
|
||||
model = models.import_model(model_name)(model_path, **in_options)
|
||||
converter = model.get_converter(**in_options)
|
||||
converter.dummy_predict()
|
||||
|
||||
cq.put ( {'op':'init', 'converter' : converter.copy_and_set_predictor( None ) } )
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
io.log_info ('Running on %s.' % (client_dict['device_name']) )
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.device_name = client_dict['device_name']
|
||||
self.converter = client_dict['converter']
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.alignments = client_dict['alignments']
|
||||
self.debug = client_dict['debug']
|
||||
|
||||
while True:
|
||||
while not sq.empty():
|
||||
obj = sq.get()
|
||||
obj_op = obj['op']
|
||||
if obj_op == 'predict':
|
||||
result = converter.predictor ( obj['face'] )
|
||||
cq.put ( {'op':'predict_result', 'result':result} )
|
||||
time.sleep(0.005)
|
||||
except Exception as e:
|
||||
print ( 'Error: %s' % (str(e)))
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
|
||||
from utils.SubprocessorBase import SubprocessorBase
|
||||
class ConvertSubprocessor(SubprocessorBase):
|
||||
from nnlib import nnlib
|
||||
#model process ate all GPU mem,
|
||||
#so we cannot use GPU for any TF operations in converter processes
|
||||
#therefore forcing active_DeviceConfig to CPU only
|
||||
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||
|
||||
return None
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
filename_path = Path(data)
|
||||
files_processed = 1
|
||||
faces_processed = 0
|
||||
|
||||
output_filename_path = self.output_path / (filename_path.stem + '.png')
|
||||
|
||||
if self.converter.type == Converter.TYPE_FACE and filename_path.stem not in self.alignments.keys():
|
||||
if not self.debug:
|
||||
self.log_info ( 'no faces found for %s, copying without faces' % (filename_path.name) )
|
||||
shutil.copy ( str(filename_path), str(output_filename_path) )
|
||||
else:
|
||||
image = (cv2_imread(str(filename_path)) / 255.0).astype(np.float32)
|
||||
|
||||
if self.converter.type == Converter.TYPE_IMAGE:
|
||||
image = self.converter.convert_image(image, None, self.debug)
|
||||
if self.debug:
|
||||
raise NotImplementedError
|
||||
#for img in image:
|
||||
# io.show_image ('Debug convert', img )
|
||||
# cv2.waitKey(0)
|
||||
faces_processed = 1
|
||||
elif self.converter.type == Converter.TYPE_IMAGE_WITH_LANDMARKS:
|
||||
if filename_path.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filename_path) )
|
||||
elif filename_path.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filename_path) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
if dflimg is not None:
|
||||
image_landmarks = dflimg.get_landmarks()
|
||||
|
||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||
|
||||
if self.debug:
|
||||
raise NotImplementedError
|
||||
#for img in image:
|
||||
# io.show_image ('Debug convert', img )
|
||||
# cv2.waitKey(0)
|
||||
faces_processed = 1
|
||||
else:
|
||||
self.log_err ("%s is not a dfl image file" % (filename_path.name) )
|
||||
|
||||
elif self.converter.type == Converter.TYPE_FACE:
|
||||
faces = self.alignments[filename_path.stem]
|
||||
|
||||
if self.debug:
|
||||
debug_images = []
|
||||
|
||||
for face_num, image_landmarks in enumerate(faces):
|
||||
try:
|
||||
if self.debug:
|
||||
self.log_info ( '\nConverting face_num [%d] in file [%s]' % (face_num, filename_path) )
|
||||
|
||||
if self.debug:
|
||||
debug_images += self.converter.convert_face(image, image_landmarks, self.debug)
|
||||
else:
|
||||
image = self.converter.convert_face(image, image_landmarks, self.debug)
|
||||
|
||||
except Exception as e:
|
||||
self.log_info ( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, str(e)) )
|
||||
traceback.print_exc()
|
||||
|
||||
if self.debug:
|
||||
return (1, debug_images)
|
||||
|
||||
faces_processed = len(faces)
|
||||
|
||||
if not self.debug:
|
||||
cv2_imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
|
||||
|
||||
|
||||
return (0, files_processed, faces_processed)
|
||||
|
||||
#overridable
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data
|
||||
|
||||
#override
|
||||
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False, **in_options):
|
||||
super().__init__('Converter', 86400 if debug == True else 60)
|
||||
self.converter = converter
|
||||
self.input_path_image_paths = input_path_image_paths
|
||||
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False):
|
||||
super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if debug == True else 60)
|
||||
|
||||
self.converter = converter
|
||||
self.host_processor, self.cli_func = SubprocessFunctionCaller.make_pair ( self.converter.predictor_func )
|
||||
self.process_converter = self.converter.copy_and_set_predictor(self.cli_func)
|
||||
|
||||
self.input_data = self.input_path_image_paths = input_path_image_paths
|
||||
self.output_path = output_path
|
||||
self.alignments = alignments
|
||||
self.debug = debug
|
||||
self.in_options = in_options
|
||||
self.input_data = self.input_path_image_paths
|
||||
|
||||
self.files_processed = 0
|
||||
self.faces_processed = 0
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
r = [0] if self.debug else range( min(multiprocessing.cpu_count(), 6) )
|
||||
|
||||
|
||||
|
||||
for i in r:
|
||||
yield 'CPU%d' % (i), {}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'converter' : self.converter,
|
||||
'converter' : self.process_converter,
|
||||
'output_dir' : str(self.output_path),
|
||||
'alignments' : self.alignments,
|
||||
'debug': self.debug,
|
||||
'in_options': self.in_options
|
||||
'debug': self.debug
|
||||
}
|
||||
|
||||
#override
|
||||
def get_no_process_started_message(self):
|
||||
return 'Unable to start CPU processes.'
|
||||
|
||||
#overridable optional
|
||||
def on_clients_initialized(self):
|
||||
if self.debug:
|
||||
io.named_window ("Debug convert")
|
||||
|
||||
io.progress_bar ("Converting", len (self.input_data) )
|
||||
|
||||
#overridable optional
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
if self.debug:
|
||||
io.destroy_all_windows()
|
||||
|
||||
#override
|
||||
def onHostGetProgressBarDesc(self):
|
||||
return "Converting"
|
||||
|
||||
#override
|
||||
def onHostGetProgressBarLen(self):
|
||||
return len (self.input_data)
|
||||
|
||||
#override
|
||||
def onHostGetData(self, host_dict):
|
||||
def get_data(self, host_dict):
|
||||
if len (self.input_data) > 0:
|
||||
return self.input_data.pop(0)
|
||||
return None
|
||||
|
||||
#override
|
||||
def onHostDataReturn (self, host_dict, data):
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.input_data.insert(0, data)
|
||||
|
||||
#overridable
|
||||
def onClientGetDataName (self, data):
|
||||
#return string identificator of your data
|
||||
return data
|
||||
|
||||
#override
|
||||
def onClientInitialize(self, client_dict):
|
||||
print ('Running on %s.' % (client_dict['device_name']) )
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.device_name = client_dict['device_name']
|
||||
self.converter = client_dict['converter']
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.alignments = client_dict['alignments']
|
||||
self.debug = client_dict['debug']
|
||||
|
||||
from nnlib import nnlib
|
||||
#model process ate all GPU mem,
|
||||
#so we cannot use GPU for any TF operations in converter processes (for example image_utils.TFLabConverter)
|
||||
#therefore forcing active_DeviceConfig to CPU only
|
||||
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||
|
||||
return None
|
||||
|
||||
#override
|
||||
def onClientFinalize(self):
|
||||
pass
|
||||
def on_result (self, host_dict, data, result):
|
||||
if result[0] == 0:
|
||||
self.files_processed += result[0]
|
||||
self.faces_processed += result[1]
|
||||
elif result[0] == 1:
|
||||
for img in result[1]:
|
||||
io.show_image ('Debug convert', (img*255).astype(np.uint8) )
|
||||
io.wait_any_key()
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
#override
|
||||
def on_tick(self):
|
||||
self.host_processor.process_messages()
|
||||
|
||||
#override
|
||||
def onClientProcessData(self, data):
|
||||
filename_path = Path(data)
|
||||
|
||||
files_processed = 1
|
||||
faces_processed = 0
|
||||
|
||||
output_filename_path = self.output_path / (filename_path.stem + '.png')
|
||||
|
||||
if self.converter.get_mode() == ConverterBase.MODE_FACE and filename_path.stem not in self.alignments.keys():
|
||||
if not self.debug:
|
||||
print ( 'no faces found for %s, copying without faces' % (filename_path.name) )
|
||||
shutil.copy ( str(filename_path), str(output_filename_path) )
|
||||
else:
|
||||
image = (cv2_imread(str(filename_path)) / 255.0).astype(np.float32)
|
||||
|
||||
if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
|
||||
image = self.converter.convert_image(image, None, self.debug)
|
||||
if self.debug:
|
||||
for img in image:
|
||||
cv2.imshow ('Debug convert', img )
|
||||
cv2.waitKey(0)
|
||||
faces_processed = 1
|
||||
elif self.converter.get_mode() == ConverterBase.MODE_IMAGE_WITH_LANDMARKS:
|
||||
if filename_path.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filename_path), throw_on_no_embedded_data=True )
|
||||
elif filename_path.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filename_path), throw_on_no_embedded_data=True )
|
||||
else:
|
||||
raise Exception ("%s is not a dfl image file" % (filename_path.name) )
|
||||
|
||||
image_landmarks = dflimg.get_landmarks()
|
||||
|
||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||
if self.debug:
|
||||
for img in image:
|
||||
cv2.imshow ('Debug convert', img )
|
||||
cv2.waitKey(0)
|
||||
faces_processed = 1
|
||||
elif self.converter.get_mode() == ConverterBase.MODE_FACE:
|
||||
faces = self.alignments[filename_path.stem]
|
||||
for face_num, image_landmarks in enumerate(faces):
|
||||
try:
|
||||
if self.debug:
|
||||
print ( '\nConverting face_num [%d] in file [%s]' % (face_num, filename_path) )
|
||||
|
||||
image = self.converter.convert_face(image, image_landmarks, self.debug)
|
||||
if self.debug:
|
||||
for img in image:
|
||||
cv2.imshow ('Debug convert', (img*255).astype(np.uint8) )
|
||||
cv2.waitKey(0)
|
||||
except Exception as e:
|
||||
print ( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, str(e)) )
|
||||
traceback.print_exc()
|
||||
faces_processed = len(faces)
|
||||
|
||||
if not self.debug:
|
||||
cv2_imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
|
||||
|
||||
|
||||
return (files_processed, faces_processed)
|
||||
|
||||
#override
|
||||
def onHostResult (self, host_dict, data, result):
|
||||
self.files_processed += result[0]
|
||||
self.faces_processed += result[1]
|
||||
return 1
|
||||
|
||||
#override
|
||||
def onFinalizeAndGetResult(self):
|
||||
def get_result(self):
|
||||
return self.files_processed, self.faces_processed
|
||||
|
||||
def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_options):
|
||||
print ("Running converter.\r\n")
|
||||
def main (args, device_args):
|
||||
io.log_info ("Running converter.\r\n")
|
||||
|
||||
debug = in_options['debug']
|
||||
aligned_dir = args.get('aligned_dir', None)
|
||||
|
||||
try:
|
||||
input_path = Path(input_dir)
|
||||
output_path = Path(output_dir)
|
||||
model_path = Path(model_dir)
|
||||
input_path = Path(args['input_dir'])
|
||||
output_path = Path(args['output_dir'])
|
||||
model_path = Path(args['model_dir'])
|
||||
|
||||
if not input_path.exists():
|
||||
print('Input directory not found. Please ensure it exists.')
|
||||
io.log_err('Input directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
if output_path.exists():
|
||||
|
@ -233,126 +209,116 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
|
|||
Path(filename).unlink()
|
||||
else:
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
|
||||
|
||||
if not model_path.exists():
|
||||
print('Model directory not found. Please ensure it exists.')
|
||||
io.log_err('Model directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
model_sq = multiprocessing.Queue()
|
||||
model_cq = multiprocessing.Queue()
|
||||
model_lock = multiprocessing.Lock()
|
||||
model_p = multiprocessing.Process(target=model_process, args=( sys.stdin.fileno(), model_name, model_dir, in_options, model_sq, model_cq))
|
||||
model_p.start()
|
||||
|
||||
while True:
|
||||
if not model_cq.empty():
|
||||
obj = model_cq.get()
|
||||
obj_op = obj['op']
|
||||
if obj_op == 'init':
|
||||
converter = obj['converter']
|
||||
break
|
||||
|
||||
import models
|
||||
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
|
||||
converter = model.get_converter()
|
||||
converter.dummy_predict()
|
||||
|
||||
alignments = None
|
||||
|
||||
if converter.get_mode() == ConverterBase.MODE_FACE:
|
||||
if converter.type == Converter.TYPE_FACE:
|
||||
if aligned_dir is None:
|
||||
print('Aligned directory not found. Please ensure it exists.')
|
||||
io.log_err('Aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
aligned_path = Path(aligned_dir)
|
||||
if not aligned_path.exists():
|
||||
print('Aligned directory not found. Please ensure it exists.')
|
||||
io.log_err('Aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
alignments = {}
|
||||
|
||||
aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
|
||||
for filepath in tqdm(aligned_path_image_paths, desc="Collecting alignments", ascii=True ):
|
||||
for filepath in io.progress_bar_generator(aligned_path_image_paths, "Collecting alignments"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
print ("%s is not a dfl image file" % (filepath.name) )
|
||||
|
||||
dflimg = None
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
source_filename_stem = Path( dflimg.get_source_filename() ).stem
|
||||
if source_filename_stem not in alignments.keys():
|
||||
alignments[ source_filename_stem ] = []
|
||||
|
||||
alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
|
||||
|
||||
|
||||
#interpolate landmarks
|
||||
#from facelib import LandmarksProcessor
|
||||
#from facelib import FaceType
|
||||
#a = sorted(alignments.keys())
|
||||
#a_len = len(a)
|
||||
#
|
||||
#box_pts = 3
|
||||
#box = np.ones(box_pts)/box_pts
|
||||
#for i in range( a_len ):
|
||||
# if i >= box_pts and i <= a_len-box_pts-1:
|
||||
# af0 = alignments[ a[i] ][0] ##first face
|
||||
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
|
||||
#
|
||||
# points = []
|
||||
#
|
||||
# for j in range(-box_pts, box_pts+1):
|
||||
# af = alignments[ a[i+j] ][0] ##first face
|
||||
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
|
||||
# p = LandmarksProcessor.transform_points (af, m)
|
||||
# points.append (p)
|
||||
#
|
||||
# points = np.array(points)
|
||||
# points_len = len(points)
|
||||
# t_points = np.transpose(points, [1,0,2])
|
||||
#
|
||||
# p1 = np.array ( [ int(np.convolve(x[:,0], box, mode='same')[points_len//2]) for x in t_points ] )
|
||||
# p2 = np.array ( [ int(np.convolve(x[:,1], box, mode='same')[points_len//2]) for x in t_points ] )
|
||||
#
|
||||
# new_points = np.concatenate( [np.expand_dims(p1,-1),np.expand_dims(p2,-1)], -1 )
|
||||
#
|
||||
# alignments[ a[i] ][0] = LandmarksProcessor.transform_points (new_points, m0, True).astype(np.int32)
|
||||
|
||||
|
||||
files_processed, faces_processed = ConvertSubprocessor (
|
||||
converter = converter.copy_and_set_predictor( model_process_predictor(model_sq,model_cq,model_lock) ),
|
||||
converter = converter,
|
||||
input_path_image_paths = Path_utils.get_image_paths(input_path),
|
||||
output_path = output_path,
|
||||
alignments = alignments,
|
||||
**in_options ).process()
|
||||
alignments = alignments,
|
||||
debug = args.get('debug',False)
|
||||
).run()
|
||||
|
||||
model_p.terminate()
|
||||
|
||||
'''
|
||||
if model_name == 'AVATAR':
|
||||
output_path_image_paths = Path_utils.get_image_paths(output_path)
|
||||
|
||||
last_ok_frame = -1
|
||||
for filename in output_path_image_paths:
|
||||
filename_path = Path(filename)
|
||||
stem = Path(filename).stem
|
||||
try:
|
||||
frame = int(stem)
|
||||
except:
|
||||
raise Exception ('Aligned avatars must be created from indexed sequence files.')
|
||||
|
||||
if frame-last_ok_frame > 1:
|
||||
start = last_ok_frame + 1
|
||||
end = frame - 1
|
||||
|
||||
print ("Filling gaps: [%d...%d]" % (start, end) )
|
||||
for i in range (start, end+1):
|
||||
shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix )) ) )
|
||||
|
||||
last_ok_frame = frame
|
||||
'''
|
||||
model.finalize()
|
||||
|
||||
except Exception as e:
|
||||
print ( 'Error: %s' % (str(e)))
|
||||
traceback.print_exc()
|
||||
|
||||
'''
|
||||
if model_name == 'AVATAR':
|
||||
output_path_image_paths = Path_utils.get_image_paths(output_path)
|
||||
|
||||
|
||||
last_ok_frame = -1
|
||||
for filename in output_path_image_paths:
|
||||
filename_path = Path(filename)
|
||||
stem = Path(filename).stem
|
||||
try:
|
||||
frame = int(stem)
|
||||
except:
|
||||
raise Exception ('Aligned avatars must be created from indexed sequence files.')
|
||||
|
||||
if frame-last_ok_frame > 1:
|
||||
start = last_ok_frame + 1
|
||||
end = frame - 1
|
||||
|
||||
print ("Filling gaps: [%d...%d]" % (start, end) )
|
||||
for i in range (start, end+1):
|
||||
shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix )) ) )
|
||||
|
||||
last_ok_frame = frame
|
||||
'''
|
||||
#interpolate landmarks
|
||||
#from facelib import LandmarksProcessor
|
||||
#from facelib import FaceType
|
||||
#a = sorted(alignments.keys())
|
||||
#a_len = len(a)
|
||||
#
|
||||
#box_pts = 3
|
||||
#box = np.ones(box_pts)/box_pts
|
||||
#for i in range( a_len ):
|
||||
# if i >= box_pts and i <= a_len-box_pts-1:
|
||||
# af0 = alignments[ a[i] ][0] ##first face
|
||||
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
|
||||
#
|
||||
# points = []
|
||||
#
|
||||
# for j in range(-box_pts, box_pts+1):
|
||||
# af = alignments[ a[i+j] ][0] ##first face
|
||||
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
|
||||
# p = LandmarksProcessor.transform_points (af, m)
|
||||
# points.append (p)
|
||||
#
|
||||
# points = np.array(points)
|
||||
# points_len = len(points)
|
||||
# t_points = np.transpose(points, [1,0,2])
|
||||
#
|
||||
# p1 = np.array ( [ int(np.convolve(x[:,0], box, mode='same')[points_len//2]) for x in t_points ] )
|
||||
# p2 = np.array ( [ int(np.convolve(x[:,1], box, mode='same')[points_len//2]) for x in t_points ] )
|
||||
#
|
||||
# new_points = np.concatenate( [np.expand_dims(p1,-1),np.expand_dims(p2,-1)], -1 )
|
||||
#
|
||||
# alignments[ a[i] ][0] = LandmarksProcessor.transform_points (new_points, m0, True).astype(np.int32)
|
||||
|
|
|
@ -15,10 +15,137 @@ import facelib
|
|||
from facelib import FaceType
|
||||
from facelib import LandmarksProcessor
|
||||
from nnlib import nnlib
|
||||
from joblib import Subprocessor
|
||||
from interact import interact as io
|
||||
|
||||
class ExtractSubprocessor(Subprocessor):
|
||||
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
||||
from utils.SubprocessorBase import SubprocessorBase
|
||||
class ExtractSubprocessor(SubprocessorBase):
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
self.log_info ('Running on %s.' % (client_dict['device_name']) )
|
||||
self.type = client_dict['type']
|
||||
self.image_size = client_dict['image_size']
|
||||
self.face_type = client_dict['face_type']
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.cpu_only = client_dict['device_type'] == 'CPU'
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.debug = client_dict['debug']
|
||||
self.detector = client_dict['detector']
|
||||
|
||||
self.cached_image = (None, None)
|
||||
|
||||
self.e = None
|
||||
device_config = nnlib.DeviceConfig ( cpu_only=self.cpu_only, force_gpu_idx=self.device_idx, allow_growth=True)
|
||||
if self.type == 'rects':
|
||||
if self.detector is not None:
|
||||
if self.detector == 'mt':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.MTCExtractor()
|
||||
elif self.detector == 'dlib':
|
||||
nnlib.import_dlib (device_config)
|
||||
self.e = facelib.DLIBExtractor(nnlib.dlib)
|
||||
else:
|
||||
raise ValueError ("Wrond detector type.")
|
||||
|
||||
if self.e is not None:
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.LandmarksExtractor(nnlib.keras)
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'final':
|
||||
pass
|
||||
|
||||
#override
|
||||
def on_finalize(self):
|
||||
if self.e is not None:
|
||||
self.e.__exit__()
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
filename_path = Path( data[0] )
|
||||
|
||||
filename_path_str = str(filename_path)
|
||||
if self.cached_image[0] == filename_path_str:
|
||||
image = self.cached_image[1]
|
||||
else:
|
||||
image = cv2_imread( filename_path_str )
|
||||
self.cached_image = ( filename_path_str, image )
|
||||
|
||||
if image is None:
|
||||
self.log_err ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
|
||||
else:
|
||||
if self.type == 'rects':
|
||||
rects = self.e.extract_from_bgr (image)
|
||||
return [str(filename_path), rects]
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
rects = data[1]
|
||||
landmarks = self.e.extract_from_bgr (image, rects)
|
||||
return [str(filename_path), landmarks]
|
||||
|
||||
elif self.type == 'final':
|
||||
src_dflimg = None
|
||||
(h,w,c) = image.shape
|
||||
if h == w:
|
||||
#extracting from already extracted jpg image?
|
||||
if filename_path.suffix == '.jpg':
|
||||
src_dflimg = DFLJPG.load ( str(filename_path) )
|
||||
|
||||
result = []
|
||||
faces = data[1]
|
||||
|
||||
if self.debug:
|
||||
debug_output_file = '{}{}'.format( str(Path(str(self.output_path) + '_debug') / filename_path.stem), '.jpg')
|
||||
debug_image = image.copy()
|
||||
|
||||
for (face_idx, face) in enumerate(faces):
|
||||
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
|
||||
|
||||
rect = face[0]
|
||||
image_landmarks = np.array(face[1])
|
||||
|
||||
if self.debug:
|
||||
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type)
|
||||
|
||||
if self.face_type == FaceType.MARK_ONLY:
|
||||
face_image = image
|
||||
face_image_landmarks = image_landmarks
|
||||
else:
|
||||
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
|
||||
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
|
||||
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
|
||||
|
||||
if src_dflimg is not None:
|
||||
#if extracting from dflimg just copy it in order not to lose quality
|
||||
shutil.copy ( str(filename_path), str(output_file) )
|
||||
else:
|
||||
cv2_imwrite(output_file, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), 85] )
|
||||
|
||||
DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
|
||||
landmarks = face_image_landmarks.tolist(),
|
||||
source_filename = filename_path.name,
|
||||
source_rect= rect,
|
||||
source_landmarks = image_landmarks.tolist()
|
||||
)
|
||||
|
||||
result.append (output_file)
|
||||
|
||||
if self.debug:
|
||||
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||
|
||||
return result
|
||||
return None
|
||||
|
||||
#overridable
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
#override
|
||||
def __init__(self, input_data, type, image_size, face_type, debug, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, detector=None, output_path=None ):
|
||||
self.input_data = input_data
|
||||
|
@ -35,34 +162,36 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
self.result = []
|
||||
|
||||
no_response_time_sec = 60 if not self.manual else 999999
|
||||
super().__init__('Extractor', no_response_time_sec)
|
||||
super().__init__('Extractor', ExtractSubprocessor.Cli, no_response_time_sec)
|
||||
|
||||
#override
|
||||
def onHostClientsInitialized(self):
|
||||
def on_clients_initialized(self):
|
||||
if self.manual == True:
|
||||
self.wnd_name = 'Manual pass'
|
||||
cv2.namedWindow(self.wnd_name)
|
||||
|
||||
self.landmarks = None
|
||||
self.param_x = -1
|
||||
self.param_y = -1
|
||||
self.param_rect_size = -1
|
||||
self.param = {'x': 0, 'y': 0, 'rect_size' : 100, 'rect_locked' : False, 'redraw_needed' : False }
|
||||
io.named_window(self.wnd_name)
|
||||
io.capture_mouse(self.wnd_name)
|
||||
io.capture_keys(self.wnd_name)
|
||||
|
||||
def onMouse(event, x, y, flags, param):
|
||||
if event == cv2.EVENT_MOUSEWHEEL:
|
||||
mod = 1 if flags > 0 else -1
|
||||
diff = 1 if param['rect_size'] <= 40 else np.clip(param['rect_size'] / 10, 1, 10)
|
||||
param['rect_size'] = max (5, param['rect_size'] + diff*mod)
|
||||
elif event == cv2.EVENT_LBUTTONDOWN:
|
||||
param['rect_locked'] = not param['rect_locked']
|
||||
param['redraw_needed'] = True
|
||||
elif not param['rect_locked']:
|
||||
param['x'] = x
|
||||
param['y'] = y
|
||||
|
||||
cv2.setMouseCallback(self.wnd_name, onMouse, self.param)
|
||||
|
||||
self.cache_original_image = (None, None)
|
||||
self.cache_image = (None, None)
|
||||
self.cache_text_lines_img = (None, None)
|
||||
|
||||
self.landmarks = None
|
||||
self.x = 0
|
||||
self.y = 0
|
||||
self.rect_size = 100
|
||||
self.rect_locked = False
|
||||
self.redraw_needed = True
|
||||
|
||||
io.progress_bar (None, len (self.input_data))
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
if self.manual == True:
|
||||
io.destroy_all_windows()
|
||||
|
||||
io.progress_bar_close()
|
||||
|
||||
def get_devices_for_type (self, type, multi_gpu, cpu_only):
|
||||
if not cpu_only and (type == 'rects' or type == 'landmarks'):
|
||||
if type == 'rects' and self.detector == 'mt' and nnlib.device.backend == "plaidML":
|
||||
|
@ -86,8 +215,11 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
yield (idx, 'GPU', dev_name, dev_vram)
|
||||
|
||||
if cpu_only and (type == 'rects' or type == 'landmarks'):
|
||||
for i in range( min(8, multiprocessing.cpu_count() // 2) ):
|
||||
yield (i, 'CPU', 'CPU%d' % (i), 0 )
|
||||
if self.manual:
|
||||
yield (0, 'CPU', 'CPU', 0 )
|
||||
else:
|
||||
for i in range( min(8, multiprocessing.cpu_count() // 2) ):
|
||||
yield (i, 'CPU', 'CPU%d' % (i), 0 )
|
||||
|
||||
if type == 'final':
|
||||
for i in range( min(8, multiprocessing.cpu_count()) ):
|
||||
|
@ -108,25 +240,9 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
client_dict['device_name'] = device_name
|
||||
client_dict['device_type'] = device_type
|
||||
yield client_dict['device_name'], {}, client_dict
|
||||
|
||||
|
||||
|
||||
#override
|
||||
def get_no_process_started_message(self):
|
||||
if (self.type == 'rects' or self.type == 'landmarks'):
|
||||
print ( 'You have no capable GPUs. Try to close programs which can consume VRAM, and run again.')
|
||||
elif self.type == 'final':
|
||||
print ( 'Unable to start CPU processes.')
|
||||
|
||||
#override
|
||||
def onHostGetProgressBarDesc(self):
|
||||
return None
|
||||
|
||||
#override
|
||||
def onHostGetProgressBarLen(self):
|
||||
return len (self.input_data)
|
||||
|
||||
#override
|
||||
def onHostGetData(self, host_dict):
|
||||
def get_data(self, host_dict):
|
||||
if not self.manual:
|
||||
if len (self.input_data) > 0:
|
||||
return self.input_data.pop(0)
|
||||
|
@ -146,33 +262,68 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
if len(faces) > 0:
|
||||
self.rect, self.landmarks = faces.pop()
|
||||
|
||||
self.param['rect_locked'] = True
|
||||
self.rect_locked = True
|
||||
self.redraw_needed = True
|
||||
faces.clear()
|
||||
self.param['rect_size'] = ( self.rect[2] - self.rect[0] ) / 2
|
||||
self.param['x'] = ( ( self.rect[0] + self.rect[2] ) / 2 ) * self.view_scale
|
||||
self.param['y'] = ( ( self.rect[1] + self.rect[3] ) / 2 ) * self.view_scale
|
||||
|
||||
self.rect_size = ( self.rect[2] - self.rect[0] ) / 2
|
||||
self.x = ( self.rect[0] + self.rect[2] ) / 2
|
||||
self.y = ( self.rect[1] + self.rect[3] ) / 2
|
||||
|
||||
if len(faces) == 0:
|
||||
self.original_image = cv2_imread(filename)
|
||||
if self.cache_original_image[0] == filename:
|
||||
self.original_image = self.cache_original_image[1]
|
||||
else:
|
||||
self.original_image = cv2_imread( filename )
|
||||
self.cache_original_image = (filename, self.original_image )
|
||||
|
||||
(h,w,c) = self.original_image.shape
|
||||
self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0/9.0) )
|
||||
self.original_image = cv2.resize (self.original_image, ( int(w*self.view_scale), int(h*self.view_scale) ), interpolation=cv2.INTER_LINEAR)
|
||||
(h,w,c) = self.original_image.shape
|
||||
self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0/9.0) )
|
||||
|
||||
self.text_lines_img = (image_utils.get_draw_text_lines ( self.original_image, (0,0, self.original_image.shape[1], min(100, self.original_image.shape[0]) ),
|
||||
[ 'Match landmarks with face exactly. Click to confirm/unconfirm selection',
|
||||
'[Enter] - confirm face landmarks and continue',
|
||||
'[Space] - confirm as unmarked frame and continue',
|
||||
'[Mouse wheel] - change rect',
|
||||
'[,] [.]- prev frame, next frame',
|
||||
'[Q] - skip remaining frames'
|
||||
], (1, 1, 1) )*255).astype(np.uint8)
|
||||
if self.cache_image[0] == (h,w,c) + (self.view_scale,filename):
|
||||
self.image = self.cache_image[1]
|
||||
else:
|
||||
self.image = cv2.resize (self.original_image, ( int(w*self.view_scale), int(h*self.view_scale) ), interpolation=cv2.INTER_LINEAR)
|
||||
self.cache_image = ( (h,w,c) + (self.view_scale,filename), self.image )
|
||||
|
||||
(h,w,c) = self.image.shape
|
||||
|
||||
sh = (0,0, w, min(100, h) )
|
||||
if self.cache_text_lines_img[0] == sh:
|
||||
self.text_lines_img = self.cache_text_lines_img[1]
|
||||
else:
|
||||
self.text_lines_img = (image_utils.get_draw_text_lines ( self.image, sh,
|
||||
[ 'Match landmarks with face exactly. Click to confirm/unconfirm selection',
|
||||
'[Enter] - confirm face landmarks and continue',
|
||||
'[Space] - confirm as unmarked frame and continue',
|
||||
'[Mouse wheel] - change rect',
|
||||
'[,] [.]- prev frame, next frame',
|
||||
'[Q] - skip remaining frames'
|
||||
], (1, 1, 1) )*255).astype(np.uint8)
|
||||
|
||||
self.cache_text_lines_img = (sh, self.text_lines_img)
|
||||
|
||||
while True:
|
||||
key = cv2.waitKey(1) & 0xFF
|
||||
new_x = self.x
|
||||
new_y = self.y
|
||||
new_rect_size = self.rect_size
|
||||
|
||||
mouse_events = io.get_mouse_events(self.wnd_name)
|
||||
for ev in mouse_events:
|
||||
(x, y, ev, flags) = ev
|
||||
if ev == io.EVENT_MOUSEWHEEL and not self.rect_locked:
|
||||
mod = 1 if flags > 0 else -1
|
||||
diff = 1 if new_rect_size <= 40 else np.clip(new_rect_size / 10, 1, 10)
|
||||
new_rect_size = max (5, new_rect_size + diff*mod)
|
||||
elif ev == io.EVENT_LBUTTONDOWN:
|
||||
self.rect_locked = not self.rect_locked
|
||||
self.redraw_needed = True
|
||||
elif not self.rect_locked:
|
||||
new_x = np.clip (x, 0, w-1) / self.view_scale
|
||||
new_y = np.clip (y, 0, h-1) / self.view_scale
|
||||
|
||||
key_events = io.get_key_events(self.wnd_name)
|
||||
key, = key_events[-1] if len(key_events) > 0 else (0,)
|
||||
|
||||
if key == ord('\r') or key == ord('\n'):
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
is_frame_done = True
|
||||
|
@ -183,200 +334,81 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
elif key == ord('.'):
|
||||
allow_remark_faces = True
|
||||
# Only save the face if the rect is still locked
|
||||
if self.param['rect_locked']:
|
||||
if self.rect_locked:
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
is_frame_done = True
|
||||
break
|
||||
elif key == ord(',') and len(self.result) > 0:
|
||||
# Only save the face if the rect is still locked
|
||||
if self.param['rect_locked']:
|
||||
if self.rect_locked:
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
go_to_prev_frame = True
|
||||
break
|
||||
elif key == ord('q'):
|
||||
skip_remaining = True
|
||||
break
|
||||
|
||||
new_param_x = np.clip (self.param['x'], 0, w-1) / self.view_scale
|
||||
new_param_y = np.clip (self.param['y'], 0, h-1) / self.view_scale
|
||||
new_param_rect_size = self.param['rect_size']
|
||||
|
||||
if self.param_x != new_param_x or \
|
||||
self.param_y != new_param_y or \
|
||||
self.param_rect_size != new_param_rect_size or \
|
||||
self.param['redraw_needed']:
|
||||
|
||||
self.param_x = new_param_x
|
||||
self.param_y = new_param_y
|
||||
self.param_rect_size = new_param_rect_size
|
||||
|
||||
if self.x != new_x or \
|
||||
self.y != new_y or \
|
||||
self.rect_size != new_rect_size or \
|
||||
self.redraw_needed:
|
||||
self.x = new_x
|
||||
self.y = new_y
|
||||
self.rect_size = new_rect_size
|
||||
|
||||
self.rect = ( int(self.param_x-self.param_rect_size),
|
||||
int(self.param_y-self.param_rect_size),
|
||||
int(self.param_x+self.param_rect_size),
|
||||
int(self.param_y+self.param_rect_size) )
|
||||
self.rect = ( int(self.x-self.rect_size),
|
||||
int(self.y-self.rect_size),
|
||||
int(self.x+self.rect_size),
|
||||
int(self.y+self.rect_size) )
|
||||
|
||||
return [filename, [self.rect]]
|
||||
|
||||
|
||||
io.process_messages(0.0001)
|
||||
else:
|
||||
is_frame_done = True
|
||||
|
||||
if is_frame_done:
|
||||
self.result.append ( data )
|
||||
self.input_data.pop(0)
|
||||
self.inc_progress_bar(1)
|
||||
self.param['redraw_needed'] = True
|
||||
self.param['rect_locked'] = False
|
||||
io.progress_bar_inc(1)
|
||||
self.redraw_needed = True
|
||||
self.rect_locked = False
|
||||
elif go_to_prev_frame:
|
||||
self.input_data.insert(0, self.result.pop() )
|
||||
self.inc_progress_bar(-1)
|
||||
io.progress_bar_inc(-1)
|
||||
allow_remark_faces = True
|
||||
self.param['redraw_needed'] = True
|
||||
self.param['rect_locked'] = False
|
||||
self.redraw_needed = True
|
||||
self.rect_locked = False
|
||||
elif skip_remaining:
|
||||
if self.param['rect_locked']:
|
||||
if self.rect_locked:
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
while len(self.input_data) > 0:
|
||||
self.result.append( self.input_data.pop(0) )
|
||||
self.inc_progress_bar(1)
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
return None
|
||||
|
||||
#override
|
||||
def onHostDataReturn (self, host_dict, data):
|
||||
def on_data_return (self, host_dict, data):
|
||||
if not self.manual:
|
||||
self.input_data.insert(0, data)
|
||||
|
||||
#override
|
||||
def onClientInitialize(self, client_dict):
|
||||
self.safe_print ('Running on %s.' % (client_dict['device_name']) )
|
||||
self.type = client_dict['type']
|
||||
self.image_size = client_dict['image_size']
|
||||
self.face_type = client_dict['face_type']
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.cpu_only = client_dict['device_type'] == 'CPU'
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.debug = client_dict['debug']
|
||||
self.detector = client_dict['detector']
|
||||
|
||||
self.e = None
|
||||
device_config = nnlib.DeviceConfig ( cpu_only=self.cpu_only, force_gpu_idx=self.device_idx, allow_growth=True)
|
||||
if self.type == 'rects':
|
||||
if self.detector is not None:
|
||||
if self.detector == 'mt':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.MTCExtractor()
|
||||
elif self.detector == 'dlib':
|
||||
nnlib.import_dlib (device_config)
|
||||
self.e = facelib.DLIBExtractor(nnlib.dlib)
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.LandmarksExtractor(nnlib.keras)
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'final':
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
#override
|
||||
def onClientFinalize(self):
|
||||
if self.e is not None:
|
||||
self.e.__exit__()
|
||||
|
||||
#override
|
||||
def onClientProcessData(self, data):
|
||||
filename_path = Path( data[0] )
|
||||
|
||||
image = cv2_imread( str(filename_path) )
|
||||
|
||||
if image is None:
|
||||
print ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
|
||||
else:
|
||||
if self.type == 'rects':
|
||||
rects = self.e.extract_from_bgr (image)
|
||||
return [str(filename_path), rects]
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
rects = data[1]
|
||||
landmarks = self.e.extract_from_bgr (image, rects)
|
||||
return [str(filename_path), landmarks]
|
||||
|
||||
elif self.type == 'final':
|
||||
src_dflimg = None
|
||||
(h,w,c) = image.shape
|
||||
if h == w:
|
||||
#extracting from already extracted jpg image?
|
||||
if filename_path.suffix == '.jpg':
|
||||
src_dflimg = DFLJPG.load ( str(filename_path) )
|
||||
|
||||
result = []
|
||||
faces = data[1]
|
||||
|
||||
if self.debug:
|
||||
debug_output_file = '{}{}'.format( str(Path(str(self.output_path) + '_debug') / filename_path.stem), '.jpg')
|
||||
debug_image = image.copy()
|
||||
|
||||
for (face_idx, face) in enumerate(faces):
|
||||
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
|
||||
|
||||
rect = face[0]
|
||||
image_landmarks = np.array(face[1])
|
||||
|
||||
if self.debug:
|
||||
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type)
|
||||
|
||||
if self.face_type == FaceType.MARK_ONLY:
|
||||
face_image = image
|
||||
face_image_landmarks = image_landmarks
|
||||
else:
|
||||
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
|
||||
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
|
||||
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
|
||||
|
||||
if src_dflimg is not None:
|
||||
#if extracting from dflimg just copy it in order not to lose quality
|
||||
shutil.copy ( str(filename_path), str(output_file) )
|
||||
else:
|
||||
cv2_imwrite(output_file, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), 85] )
|
||||
|
||||
DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
|
||||
landmarks = face_image_landmarks.tolist(),
|
||||
source_filename = filename_path.name,
|
||||
source_rect= rect,
|
||||
source_landmarks = image_landmarks.tolist()
|
||||
)
|
||||
|
||||
result.append (output_file)
|
||||
|
||||
if self.debug:
|
||||
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||
|
||||
return result
|
||||
return None
|
||||
|
||||
#overridable
|
||||
def onClientGetDataName (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
#override
|
||||
def onHostResult (self, host_dict, data, result):
|
||||
def on_result (self, host_dict, data, result):
|
||||
if self.manual == True:
|
||||
self.landmarks = result[1][0][1]
|
||||
|
||||
(h,w,c) = self.original_image.shape
|
||||
image = cv2.addWeighted (self.original_image,1.0,self.text_lines_img,1.0,0)
|
||||
(h,w,c) = self.image.shape
|
||||
image = cv2.addWeighted (self.image,1.0,self.text_lines_img,1.0,0)
|
||||
view_rect = (np.array(self.rect) * self.view_scale).astype(np.int).tolist()
|
||||
view_landmarks = (np.array(self.landmarks) * self.view_scale).astype(np.int).tolist()
|
||||
|
||||
if self.param_rect_size <= 40:
|
||||
if self.rect_size <= 40:
|
||||
scaled_rect_size = h // 3 if w > h else w // 3
|
||||
|
||||
p1 = (self.param_x - self.param_rect_size, self.param_y - self.param_rect_size)
|
||||
p2 = (self.param_x + self.param_rect_size, self.param_y - self.param_rect_size)
|
||||
p3 = (self.param_x - self.param_rect_size, self.param_y + self.param_rect_size)
|
||||
p1 = (self.x - self.rect_size, self.y - self.rect_size)
|
||||
p2 = (self.x + self.rect_size, self.y - self.rect_size)
|
||||
p3 = (self.x - self.rect_size, self.y + self.rect_size)
|
||||
|
||||
wh = h if h < w else w
|
||||
np1 = (w / 2 - wh / 4, h / 2 - wh / 4)
|
||||
|
@ -389,12 +421,11 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
|
||||
LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.image_size, self.face_type)
|
||||
|
||||
if self.param['rect_locked']:
|
||||
if self.rect_locked:
|
||||
LandmarksProcessor.draw_landmarks(image, view_landmarks, (255,255,0) )
|
||||
self.param['redraw_needed'] = False
|
||||
self.redraw_needed = False
|
||||
|
||||
cv2.imshow (self.wnd_name, image)
|
||||
return 0
|
||||
io.show_image (self.wnd_name, image)
|
||||
else:
|
||||
if self.type == 'rects':
|
||||
self.result.append ( result )
|
||||
|
@ -403,98 +434,91 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
elif self.type == 'final':
|
||||
self.result += result
|
||||
|
||||
return 1
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
#override
|
||||
def onFinalizeAndGetResult(self):
|
||||
if self.manual == True:
|
||||
cv2.destroyAllWindows()
|
||||
def get_result(self):
|
||||
return self.result
|
||||
|
||||
class DeletedFilesSearcherSubprocessor(SubprocessorBase):
|
||||
|
||||
class DeletedFilesSearcherSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
self.debug_paths_stems = client_dict['debug_paths_stems']
|
||||
return None
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
input_path_stem = Path(data[0]).stem
|
||||
return any ( [ input_path_stem == d_stem for d_stem in self.debug_paths_stems] )
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
#override
|
||||
def __init__(self, input_paths, debug_paths ):
|
||||
self.input_paths = input_paths
|
||||
self.debug_paths_stems = [ Path(d).stem for d in debug_paths]
|
||||
self.result = []
|
||||
super().__init__('DeletedFilesSearcherSubprocessor', 60)
|
||||
super().__init__('DeletedFilesSearcherSubprocessor', DeletedFilesSearcherSubprocessor.Cli, 60)
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
for i in range(0, min(multiprocessing.cpu_count(), 8) ):
|
||||
yield 'CPU%d' % (i), {}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'debug_paths_stems' : self.debug_paths_stems
|
||||
}
|
||||
for i in range(min(multiprocessing.cpu_count(), 8)):
|
||||
yield 'CPU%d' % (i), {}, {'debug_paths_stems' : self.debug_paths_stems}
|
||||
|
||||
#override
|
||||
def get_no_process_started_message(self):
|
||||
print ( 'Unable to start CPU processes.')
|
||||
|
||||
def on_clients_initialized(self):
|
||||
io.progress_bar ("Searching deleted files", len (self.input_paths))
|
||||
|
||||
#override
|
||||
def onHostGetProgressBarDesc(self):
|
||||
return "Searching deleted files"
|
||||
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
#override
|
||||
def onHostGetProgressBarLen(self):
|
||||
return len (self.input_paths)
|
||||
|
||||
#override
|
||||
def onHostGetData(self, host_dict):
|
||||
def get_data(self, host_dict):
|
||||
if len (self.input_paths) > 0:
|
||||
return [self.input_paths.pop(0)]
|
||||
return None
|
||||
|
||||
#override
|
||||
def onHostDataReturn (self, host_dict, data):
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.input_paths.insert(0, data[0])
|
||||
|
||||
|
||||
#override
|
||||
def onClientInitialize(self, client_dict):
|
||||
self.debug_paths_stems = client_dict['debug_paths_stems']
|
||||
return None
|
||||
|
||||
#override
|
||||
def onClientProcessData(self, data):
|
||||
input_path_stem = Path(data[0]).stem
|
||||
return any ( [ input_path_stem == d_stem for d_stem in self.debug_paths_stems] )
|
||||
|
||||
#override
|
||||
def onClientGetDataName (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
#override
|
||||
def onHostResult (self, host_dict, data, result):
|
||||
def on_result (self, host_dict, data, result):
|
||||
if result == False:
|
||||
self.result.append( data[0] )
|
||||
return 1
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
#override
|
||||
def onFinalizeAndGetResult(self):
|
||||
def get_result(self):
|
||||
return self.result
|
||||
|
||||
'''
|
||||
detector
|
||||
'dlib'
|
||||
'mt'
|
||||
'manual'
|
||||
|
||||
face_type
|
||||
'full_face'
|
||||
'avatar'
|
||||
'''
|
||||
def main (input_dir, output_dir, debug, detector='mt', multi_gpu=True, cpu_only=False, manual_fix=False, manual_output_debug_fix=False, manual_window_size=1368, image_size=256, face_type='full_face'):
|
||||
print ("Running extractor.\r\n")
|
||||
|
||||
def main(input_dir,
|
||||
output_dir,
|
||||
debug=False,
|
||||
detector='mt',
|
||||
manual_fix=False,
|
||||
manual_output_debug_fix=False,
|
||||
manual_window_size=1368,
|
||||
image_size=256,
|
||||
face_type='full_face',
|
||||
device_args={}):
|
||||
|
||||
input_path = Path(input_dir)
|
||||
output_path = Path(output_dir)
|
||||
face_type = FaceType.fromString(face_type)
|
||||
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
||||
if not input_path.exists():
|
||||
print('Input directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
if output_path.exists():
|
||||
if not manual_output_debug_fix:
|
||||
for filename in Path_utils.get_image_paths(output_path):
|
||||
|
@ -505,19 +529,17 @@ def main (input_dir, output_dir, debug, detector='mt', multi_gpu=True, cpu_only=
|
|||
if manual_output_debug_fix:
|
||||
debug = True
|
||||
detector = 'manual'
|
||||
print('Performing re-extract frames which were deleted from _debug directory.')
|
||||
io.log_info('Performing re-extract frames which were deleted from _debug directory.')
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose=True)
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose_print_func=io.log_info)
|
||||
if debug:
|
||||
debug_output_path = Path(str(output_path) + '_debug')
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if not debug_output_path.exists():
|
||||
print ("%s not found " % ( str(debug_output_path) ))
|
||||
return
|
||||
|
||||
input_path_image_paths = DeletedFilesSearcherSubprocessor ( input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).process()
|
||||
raise ValueError("%s not found " % ( str(debug_output_path) ))
|
||||
|
||||
input_path_image_paths = DeletedFilesSearcherSubprocessor (input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).run()
|
||||
input_path_image_paths = sorted (input_path_image_paths)
|
||||
else:
|
||||
if debug_output_path.exists():
|
||||
|
@ -530,29 +552,29 @@ def main (input_dir, output_dir, debug, detector='mt', multi_gpu=True, cpu_only=
|
|||
faces_detected = 0
|
||||
if images_found != 0:
|
||||
if detector == 'manual':
|
||||
print ('Performing manual extract...')
|
||||
extracted_faces = ExtractSubprocessor ([ (filename,[]) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).process()
|
||||
io.log_info ('Performing manual extract...')
|
||||
extracted_faces = ExtractSubprocessor ([ (filename,[]) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).run()
|
||||
else:
|
||||
print ('Performing 1st pass...')
|
||||
extracted_rects = ExtractSubprocessor ([ (x,) for x in input_path_image_paths ], 'rects', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, detector=detector).process()
|
||||
io.log_info ('Performing 1st pass...')
|
||||
extracted_rects = ExtractSubprocessor ([ (x,) for x in input_path_image_paths ], 'rects', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, detector=detector).run()
|
||||
|
||||
print ('Performing 2nd pass...')
|
||||
extracted_faces = ExtractSubprocessor (extracted_rects, 'landmarks', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).process()
|
||||
io.log_info ('Performing 2nd pass...')
|
||||
extracted_faces = ExtractSubprocessor (extracted_rects, 'landmarks', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run()
|
||||
|
||||
if manual_fix:
|
||||
print ('Performing manual fix...')
|
||||
io.log_info ('Performing manual fix...')
|
||||
|
||||
if all ( np.array ( [ len(data[1]) > 0 for data in extracted_faces] ) == True ):
|
||||
print ('All faces are detected, manual fix not needed.')
|
||||
io.log_info ('All faces are detected, manual fix not needed.')
|
||||
else:
|
||||
extracted_faces = ExtractSubprocessor (extracted_faces, 'landmarks', image_size, face_type, debug, manual=True, manual_window_size=manual_window_size).process()
|
||||
extracted_faces = ExtractSubprocessor (extracted_faces, 'landmarks', image_size, face_type, debug, manual=True, manual_window_size=manual_window_size).run()
|
||||
|
||||
if len(extracted_faces) > 0:
|
||||
print ('Performing 3rd pass...')
|
||||
final_imgs_paths = ExtractSubprocessor (extracted_faces, 'final', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, output_path=output_path).process()
|
||||
io.log_info ('Performing 3rd pass...')
|
||||
final_imgs_paths = ExtractSubprocessor (extracted_faces, 'final', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, output_path=output_path).run()
|
||||
faces_detected = len(final_imgs_paths)
|
||||
|
||||
print('-------------------------')
|
||||
print('Images found: %d' % (images_found) )
|
||||
print('Faces detected: %d' % (faces_detected) )
|
||||
print('-------------------------')
|
||||
io.log_info ('-------------------------')
|
||||
io.log_info ('Images found: %d' % (images_found) )
|
||||
io.log_info ('Faces detected: %d' % (faces_detected) )
|
||||
io.log_info ('-------------------------')
|
File diff suppressed because it is too large
Load diff
|
@ -1,32 +1,34 @@
|
|||
import sys
|
||||
import traceback
|
||||
import queue
|
||||
import colorsys
|
||||
import threading
|
||||
import time
|
||||
import numpy as np
|
||||
import itertools
|
||||
|
||||
from pathlib import Path
|
||||
from utils import Path_utils
|
||||
from utils import image_utils
|
||||
import cv2
|
||||
import models
|
||||
from interact import interact as io
|
||||
|
||||
def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=15, debug=False, **in_options):
|
||||
|
||||
def trainerThread (s2c, c2s, args, device_args):
|
||||
while True:
|
||||
try:
|
||||
training_data_src_path = Path(training_data_src_dir)
|
||||
training_data_dst_path = Path(training_data_dst_dir)
|
||||
model_path = Path(model_path)
|
||||
training_data_src_path = Path( args.get('training_data_src_dir', '') )
|
||||
training_data_dst_path = Path( args.get('training_data_dst_dir', '') )
|
||||
model_path = Path( args.get('model_path', '') )
|
||||
model_name = args.get('model_name', '')
|
||||
save_interval_min = 15
|
||||
debug = args.get('debug', '')
|
||||
|
||||
if not training_data_src_path.exists():
|
||||
print( 'Training data src directory does not exist.')
|
||||
return
|
||||
io.log_err('Training data src directory does not exist.')
|
||||
break
|
||||
|
||||
if not training_data_dst_path.exists():
|
||||
print( 'Training data dst directory does not exist.')
|
||||
return
|
||||
io.log_err('Training data dst directory does not exist.')
|
||||
break
|
||||
|
||||
if not model_path.exists():
|
||||
model_path.mkdir(exist_ok=True)
|
||||
|
@ -36,7 +38,7 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
|
|||
training_data_src_path=training_data_src_path,
|
||||
training_data_dst_path=training_data_dst_path,
|
||||
debug=debug,
|
||||
**in_options)
|
||||
device_args=device_args)
|
||||
|
||||
is_reached_goal = model.is_reached_epoch_goal()
|
||||
is_upd_save_time_after_train = False
|
||||
|
@ -48,10 +50,10 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
|
|||
def send_preview():
|
||||
if not debug:
|
||||
previews = model.get_previews()
|
||||
output_queue.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } )
|
||||
c2s.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } )
|
||||
else:
|
||||
previews = [( 'debug, press update for new', model.debug_one_epoch())]
|
||||
output_queue.put ( {'op':'show', 'previews': previews} )
|
||||
c2s.put ( {'op':'show', 'previews': previews} )
|
||||
|
||||
|
||||
if model.is_first_run():
|
||||
|
@ -59,11 +61,11 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
|
|||
|
||||
if model.get_target_epoch() != 0:
|
||||
if is_reached_goal:
|
||||
print ('Model already trained to target epoch. You can use preview.')
|
||||
io.log_info('Model already trained to target epoch. You can use preview.')
|
||||
else:
|
||||
print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % ( model.get_target_epoch() ) )
|
||||
io.log_info('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % ( model.get_target_epoch() ) )
|
||||
else:
|
||||
print('Starting. Press "Enter" to stop training and save model.')
|
||||
io.log_info('Starting. Press "Enter" to stop training and save model.')
|
||||
|
||||
last_save_time = time.time()
|
||||
|
||||
|
@ -75,12 +77,12 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
|
|||
#save resets plaidML programs, so upd last_save_time only after plaidML rebuild them
|
||||
last_save_time = time.time()
|
||||
|
||||
print (loss_string, end='\r')
|
||||
io.log_info (loss_string, end='\r')
|
||||
if model.get_target_epoch() != 0 and model.is_reached_epoch_goal():
|
||||
print ('Reached target epoch.')
|
||||
io.log_info ('Reached target epoch.')
|
||||
model_save()
|
||||
is_reached_goal = True
|
||||
print ('You can use preview now.')
|
||||
io.log_info ('You can use preview now.')
|
||||
|
||||
if not is_reached_goal and (time.time() - last_save_time) >= save_interval_min*60:
|
||||
last_save_time = time.time()
|
||||
|
@ -95,8 +97,8 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
|
|||
if debug:
|
||||
time.sleep(0.005)
|
||||
|
||||
while not input_queue.empty():
|
||||
input = input_queue.get()
|
||||
while not s2c.empty():
|
||||
input = s2c.get()
|
||||
op = input['op']
|
||||
if op == 'save':
|
||||
model_save()
|
||||
|
@ -120,131 +122,142 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
|
|||
print ('Error: %s' % (str(e)))
|
||||
traceback.print_exc()
|
||||
break
|
||||
output_queue.put ( {'op':'close'} )
|
||||
c2s.put ( {'op':'close'} )
|
||||
|
||||
def previewThread (input_queue, output_queue):
|
||||
|
||||
|
||||
previews = None
|
||||
loss_history = None
|
||||
selected_preview = 0
|
||||
update_preview = False
|
||||
is_showing = False
|
||||
is_waiting_preview = False
|
||||
show_last_history_epochs_count = 0
|
||||
epoch = 0
|
||||
while True:
|
||||
if not input_queue.empty():
|
||||
input = input_queue.get()
|
||||
op = input['op']
|
||||
if op == 'show':
|
||||
is_waiting_preview = False
|
||||
loss_history = input['loss_history'] if 'loss_history' in input.keys() else None
|
||||
previews = input['previews'] if 'previews' in input.keys() else None
|
||||
epoch = input['epoch'] if 'epoch' in input.keys() else 0
|
||||
if previews is not None:
|
||||
max_w = 0
|
||||
max_h = 0
|
||||
for (preview_name, preview_rgb) in previews:
|
||||
(h, w, c) = preview_rgb.shape
|
||||
max_h = max (max_h, h)
|
||||
max_w = max (max_w, w)
|
||||
|
||||
max_size = 800
|
||||
if max_h > max_size:
|
||||
max_w = int( max_w / (max_h / max_size) )
|
||||
max_h = max_size
|
||||
|
||||
#make all previews size equal
|
||||
for preview in previews[:]:
|
||||
(preview_name, preview_rgb) = preview
|
||||
(h, w, c) = preview_rgb.shape
|
||||
if h != max_h or w != max_w:
|
||||
previews.remove(preview)
|
||||
previews.append ( (preview_name, cv2.resize(preview_rgb, (max_w, max_h))) )
|
||||
selected_preview = selected_preview % len(previews)
|
||||
update_preview = True
|
||||
elif op == 'close':
|
||||
break
|
||||
|
||||
if update_preview:
|
||||
update_preview = False
|
||||
|
||||
selected_preview_name = previews[selected_preview][0]
|
||||
selected_preview_rgb = previews[selected_preview][1]
|
||||
(h,w,c) = selected_preview_rgb.shape
|
||||
|
||||
# HEAD
|
||||
head_lines = [
|
||||
'[s]:save [enter]:exit',
|
||||
'[p]:update [space]:next preview [l]:change history range',
|
||||
'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) )
|
||||
]
|
||||
head_line_height = 15
|
||||
head_height = len(head_lines) * head_line_height
|
||||
head = np.ones ( (head_height,w,c) ) * 0.1
|
||||
|
||||
for i in range(0, len(head_lines)):
|
||||
t = i*head_line_height
|
||||
b = (i+1)*head_line_height
|
||||
head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=[0.8]*c )
|
||||
|
||||
final = head
|
||||
|
||||
if loss_history is not None:
|
||||
if show_last_history_epochs_count == 0:
|
||||
loss_history_to_show = loss_history
|
||||
else:
|
||||
loss_history_to_show = loss_history[-show_last_history_epochs_count:]
|
||||
|
||||
lh_img = models.ModelBase.get_loss_history_preview(loss_history_to_show, epoch, w, c)
|
||||
final = np.concatenate ( [final, lh_img], axis=0 )
|
||||
|
||||
final = np.concatenate ( [final, selected_preview_rgb], axis=0 )
|
||||
final = np.clip(final, 0, 1)
|
||||
|
||||
cv2.imshow ( 'Training preview', (final*255).astype(np.uint8) )
|
||||
is_showing = True
|
||||
|
||||
if is_showing:
|
||||
key = cv2.waitKey(100)
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
key = 0
|
||||
|
||||
if key == ord('\n') or key == ord('\r'):
|
||||
output_queue.put ( {'op': 'close'} )
|
||||
elif key == ord('s'):
|
||||
output_queue.put ( {'op': 'save'} )
|
||||
elif key == ord('p'):
|
||||
if not is_waiting_preview:
|
||||
is_waiting_preview = True
|
||||
output_queue.put ( {'op': 'preview'} )
|
||||
elif key == ord('l'):
|
||||
if show_last_history_epochs_count == 0:
|
||||
show_last_history_epochs_count = 5000
|
||||
elif show_last_history_epochs_count == 5000:
|
||||
show_last_history_epochs_count = 10000
|
||||
elif show_last_history_epochs_count == 10000:
|
||||
show_last_history_epochs_count = 50000
|
||||
elif show_last_history_epochs_count == 50000:
|
||||
show_last_history_epochs_count = 100000
|
||||
elif show_last_history_epochs_count == 100000:
|
||||
show_last_history_epochs_count = 0
|
||||
update_preview = True
|
||||
elif key == ord(' '):
|
||||
selected_preview = (selected_preview + 1) % len(previews)
|
||||
update_preview = True
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
def main(args, device_args):
|
||||
io.log_info ("Running trainer.\r\n")
|
||||
|
||||
def main (training_data_src_dir, training_data_dst_dir, model_path, model_name, **in_options):
|
||||
print ("Running trainer.\r\n")
|
||||
no_preview = args.get('no_preview', False)
|
||||
|
||||
output_queue = queue.Queue()
|
||||
input_queue = queue.Queue()
|
||||
import threading
|
||||
thread = threading.Thread(target=trainerThread, args=(output_queue, input_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name), kwargs=in_options )
|
||||
s2c = queue.Queue()
|
||||
c2s = queue.Queue()
|
||||
|
||||
thread = threading.Thread(target=trainerThread, args=(s2c, c2s, args, device_args) )
|
||||
thread.start()
|
||||
|
||||
previewThread (input_queue, output_queue)
|
||||
if no_preview:
|
||||
while True:
|
||||
if not c2s.empty():
|
||||
input = c2s.get()
|
||||
op = input.get('op','')
|
||||
if op == 'close':
|
||||
break
|
||||
io.process_messages(0.1)
|
||||
else:
|
||||
wnd_name = "Training preview"
|
||||
io.named_window(wnd_name)
|
||||
io.capture_keys(wnd_name)
|
||||
|
||||
previews = None
|
||||
loss_history = None
|
||||
selected_preview = 0
|
||||
update_preview = False
|
||||
is_showing = False
|
||||
is_waiting_preview = False
|
||||
show_last_history_epochs_count = 0
|
||||
epoch = 0
|
||||
while True:
|
||||
if not c2s.empty():
|
||||
input = c2s.get()
|
||||
op = input['op']
|
||||
if op == 'show':
|
||||
is_waiting_preview = False
|
||||
loss_history = input['loss_history'] if 'loss_history' in input.keys() else None
|
||||
previews = input['previews'] if 'previews' in input.keys() else None
|
||||
epoch = input['epoch'] if 'epoch' in input.keys() else 0
|
||||
if previews is not None:
|
||||
max_w = 0
|
||||
max_h = 0
|
||||
for (preview_name, preview_rgb) in previews:
|
||||
(h, w, c) = preview_rgb.shape
|
||||
max_h = max (max_h, h)
|
||||
max_w = max (max_w, w)
|
||||
|
||||
max_size = 800
|
||||
if max_h > max_size:
|
||||
max_w = int( max_w / (max_h / max_size) )
|
||||
max_h = max_size
|
||||
|
||||
#make all previews size equal
|
||||
for preview in previews[:]:
|
||||
(preview_name, preview_rgb) = preview
|
||||
(h, w, c) = preview_rgb.shape
|
||||
if h != max_h or w != max_w:
|
||||
previews.remove(preview)
|
||||
previews.append ( (preview_name, cv2.resize(preview_rgb, (max_w, max_h))) )
|
||||
selected_preview = selected_preview % len(previews)
|
||||
update_preview = True
|
||||
elif op == 'close':
|
||||
break
|
||||
|
||||
if update_preview:
|
||||
update_preview = False
|
||||
|
||||
selected_preview_name = previews[selected_preview][0]
|
||||
selected_preview_rgb = previews[selected_preview][1]
|
||||
(h,w,c) = selected_preview_rgb.shape
|
||||
|
||||
# HEAD
|
||||
head_lines = [
|
||||
'[s]:save [enter]:exit',
|
||||
'[p]:update [space]:next preview [l]:change history range',
|
||||
'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) )
|
||||
]
|
||||
head_line_height = 15
|
||||
head_height = len(head_lines) * head_line_height
|
||||
head = np.ones ( (head_height,w,c) ) * 0.1
|
||||
|
||||
for i in range(0, len(head_lines)):
|
||||
t = i*head_line_height
|
||||
b = (i+1)*head_line_height
|
||||
head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=[0.8]*c )
|
||||
|
||||
final = head
|
||||
|
||||
if loss_history is not None:
|
||||
if show_last_history_epochs_count == 0:
|
||||
loss_history_to_show = loss_history
|
||||
else:
|
||||
loss_history_to_show = loss_history[-show_last_history_epochs_count:]
|
||||
|
||||
lh_img = models.ModelBase.get_loss_history_preview(loss_history_to_show, epoch, w, c)
|
||||
final = np.concatenate ( [final, lh_img], axis=0 )
|
||||
|
||||
final = np.concatenate ( [final, selected_preview_rgb], axis=0 )
|
||||
final = np.clip(final, 0, 1)
|
||||
|
||||
io.show_image( wnd_name, (final*255).astype(np.uint8) )
|
||||
is_showing = True
|
||||
|
||||
key_events = io.get_key_events(wnd_name)
|
||||
key, = key_events[-1] if len(key_events) > 0 else (0,)
|
||||
|
||||
if key == ord('\n') or key == ord('\r'):
|
||||
s2c.put ( {'op': 'close'} )
|
||||
elif key == ord('s'):
|
||||
s2c.put ( {'op': 'save'} )
|
||||
elif key == ord('p'):
|
||||
if not is_waiting_preview:
|
||||
is_waiting_preview = True
|
||||
s2c.put ( {'op': 'preview'} )
|
||||
elif key == ord('l'):
|
||||
if show_last_history_epochs_count == 0:
|
||||
show_last_history_epochs_count = 5000
|
||||
elif show_last_history_epochs_count == 5000:
|
||||
show_last_history_epochs_count = 10000
|
||||
elif show_last_history_epochs_count == 10000:
|
||||
show_last_history_epochs_count = 50000
|
||||
elif show_last_history_epochs_count == 50000:
|
||||
show_last_history_epochs_count = 100000
|
||||
elif show_last_history_epochs_count == 100000:
|
||||
show_last_history_epochs_count = 0
|
||||
update_preview = True
|
||||
elif key == ord(' '):
|
||||
selected_preview = (selected_preview + 1) % len(previews)
|
||||
update_preview = True
|
||||
|
||||
io.process_messages(0.1)
|
||||
|
||||
io.destroy_all_windows()
|
|
@ -1,28 +1,21 @@
|
|||
import os
|
||||
import sys
|
||||
import operator
|
||||
import numpy as np
|
||||
import cv2
|
||||
from tqdm import tqdm
|
||||
from shutil import copyfile
|
||||
|
||||
import cv2
|
||||
from pathlib import Path
|
||||
from utils import Path_utils
|
||||
from utils import image_utils
|
||||
from utils.DFLPNG import DFLPNG
|
||||
from utils.DFLJPG import DFLJPG
|
||||
from utils.cv2_utils import *
|
||||
from facelib import LandmarksProcessor
|
||||
from utils.SubprocessorBase import SubprocessorBase
|
||||
import multiprocessing
|
||||
from interact import interact as io
|
||||
|
||||
def convert_png_to_jpg_file (filepath):
|
||||
filepath = Path(filepath)
|
||||
|
||||
if filepath.suffix != '.png':
|
||||
return
|
||||
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
|
||||
if dflpng is None:
|
||||
return
|
||||
|
||||
dflpng = DFLPNG.load (str(filepath) )
|
||||
if dflpng is None:
|
||||
print ("%s is not a dfl image file" % (filepath.name) )
|
||||
return
|
||||
|
||||
dfl_dict = dflpng.getDFLDictData()
|
||||
|
@ -45,27 +38,30 @@ def convert_png_to_jpg_folder (input_path):
|
|||
|
||||
print ("Converting PNG to JPG...\r\n")
|
||||
|
||||
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Converting", ascii=True):
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Converting"):
|
||||
filepath = Path(filepath)
|
||||
convert_png_to_jpg_file(filepath)
|
||||
|
||||
def add_landmarks_debug_images(input_path):
|
||||
print ("Adding landmarks debug images...")
|
||||
|
||||
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Processing", ascii=True):
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Processing"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
img = cv2_imread(str(filepath))
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
if dflimg is None:
|
||||
print ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
if not (dflimg is None or img is None):
|
||||
if img is not None:
|
||||
face_landmarks = dflimg.get_landmarks()
|
||||
LandmarksProcessor.draw_landmarks(img, face_landmarks)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue