mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-20 13:33:24 -07:00
removing trailing spaces
This commit is contained in:
parent
fa4e579b95
commit
a3df04999c
61 changed files with 2110 additions and 2103 deletions
|
@ -18,39 +18,39 @@ from interact import interact as io
|
|||
|
||||
class ConvertSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
||||
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
io.log_info ('Running on %s.' % (client_dict['device_name']) )
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.device_name = client_dict['device_name']
|
||||
self.converter = client_dict['converter']
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.alignments = client_dict['alignments']
|
||||
self.debug = client_dict['debug']
|
||||
|
||||
|
||||
#transfer and set stdin in order to work code.interact in debug subprocess
|
||||
stdin_fd = client_dict['stdin_fd']
|
||||
if stdin_fd is not None:
|
||||
sys.stdin = os.fdopen(stdin_fd)
|
||||
|
||||
from nnlib import nnlib
|
||||
|
||||
from nnlib import nnlib
|
||||
#model process ate all GPU mem,
|
||||
#so we cannot use GPU for any TF operations in converter processes
|
||||
#therefore forcing active_DeviceConfig to CPU only
|
||||
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||
|
||||
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
filename_path = Path(data)
|
||||
files_processed = 1
|
||||
faces_processed = 0
|
||||
|
||||
|
||||
output_filename_path = self.output_path / (filename_path.stem + '.png')
|
||||
|
||||
if self.converter.type == Converter.TYPE_FACE and filename_path.stem not in self.alignments.keys():
|
||||
if self.converter.type == Converter.TYPE_FACE and filename_path.stem not in self.alignments.keys():
|
||||
if not self.debug:
|
||||
self.log_info ( 'no faces found for %s, copying without faces' % (filename_path.name) )
|
||||
shutil.copy ( str(filename_path), str(output_filename_path) )
|
||||
|
@ -72,12 +72,12 @@ class ConvertSubprocessor(Subprocessor):
|
|||
dflimg = DFLJPG.load ( str(filename_path) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is not None:
|
||||
image_landmarks = dflimg.get_landmarks()
|
||||
|
||||
|
||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||
|
||||
|
||||
if self.debug:
|
||||
raise NotImplementedError
|
||||
#for img in image:
|
||||
|
@ -85,14 +85,14 @@ class ConvertSubprocessor(Subprocessor):
|
|||
# cv2.waitKey(0)
|
||||
faces_processed = 1
|
||||
else:
|
||||
self.log_err ("%s is not a dfl image file" % (filename_path.name) )
|
||||
|
||||
self.log_err ("%s is not a dfl image file" % (filename_path.name) )
|
||||
|
||||
elif self.converter.type == Converter.TYPE_FACE:
|
||||
faces = self.alignments[filename_path.stem]
|
||||
|
||||
|
||||
if self.debug:
|
||||
debug_images = []
|
||||
|
||||
|
||||
for face_num, image_landmarks in enumerate(faces):
|
||||
try:
|
||||
if self.debug:
|
||||
|
@ -101,56 +101,56 @@ class ConvertSubprocessor(Subprocessor):
|
|||
if self.debug:
|
||||
debug_images += self.converter.convert_face(image, image_landmarks, self.debug)
|
||||
else:
|
||||
image = self.converter.convert_face(image, image_landmarks, self.debug)
|
||||
|
||||
image = self.converter.convert_face(image, image_landmarks, self.debug)
|
||||
|
||||
except Exception as e:
|
||||
e_str = traceback.format_exc()
|
||||
if 'MemoryError' in e_str:
|
||||
raise Subprocessor.SilenceException
|
||||
else:
|
||||
raise Exception( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, e_str) )
|
||||
|
||||
|
||||
if self.debug:
|
||||
return (1, debug_images)
|
||||
|
||||
|
||||
faces_processed = len(faces)
|
||||
|
||||
|
||||
if not self.debug:
|
||||
cv2_imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
|
||||
|
||||
|
||||
|
||||
|
||||
return (0, files_processed, faces_processed)
|
||||
|
||||
|
||||
#overridable
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data
|
||||
|
||||
#override
|
||||
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False):
|
||||
super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if debug == True else 60)
|
||||
|
||||
self.converter = converter
|
||||
#override
|
||||
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False):
|
||||
super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if debug == True else 60)
|
||||
|
||||
self.converter = converter
|
||||
self.host_processor, self.cli_func = SubprocessFunctionCaller.make_pair ( self.converter.predictor_func )
|
||||
self.process_converter = self.converter.copy_and_set_predictor(self.cli_func)
|
||||
|
||||
|
||||
self.input_data = self.input_path_image_paths = input_path_image_paths
|
||||
self.output_path = output_path
|
||||
self.alignments = alignments
|
||||
self.debug = debug
|
||||
|
||||
|
||||
self.files_processed = 0
|
||||
self.faces_processed = 0
|
||||
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
r = [0] if self.debug else range(multiprocessing.cpu_count())
|
||||
|
||||
for i in r:
|
||||
yield 'CPU%d' % (i), {}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'converter' : self.process_converter,
|
||||
'output_dir' : str(self.output_path),
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'converter' : self.process_converter,
|
||||
'output_dir' : str(self.output_path),
|
||||
'alignments' : self.alignments,
|
||||
'debug': self.debug,
|
||||
'stdin_fd': sys.stdin.fileno() if self.debug else None
|
||||
|
@ -160,25 +160,25 @@ class ConvertSubprocessor(Subprocessor):
|
|||
def on_clients_initialized(self):
|
||||
if self.debug:
|
||||
io.named_window ("Debug convert")
|
||||
|
||||
|
||||
io.progress_bar ("Converting", len (self.input_data) )
|
||||
|
||||
|
||||
#overridable optional
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
|
||||
if self.debug:
|
||||
io.destroy_all_windows()
|
||||
|
||||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.input_data) > 0:
|
||||
return self.input_data.pop(0)
|
||||
return self.input_data.pop(0)
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.input_data.insert(0, data)
|
||||
self.input_data.insert(0, data)
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
|
@ -190,25 +190,25 @@ class ConvertSubprocessor(Subprocessor):
|
|||
io.show_image ('Debug convert', (img*255).astype(np.uint8) )
|
||||
io.wait_any_key()
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
|
||||
#override
|
||||
def on_tick(self):
|
||||
self.host_processor.process_messages()
|
||||
|
||||
|
||||
#override
|
||||
def get_result(self):
|
||||
return self.files_processed, self.faces_processed
|
||||
|
||||
|
||||
def main (args, device_args):
|
||||
io.log_info ("Running converter.\r\n")
|
||||
|
||||
|
||||
aligned_dir = args.get('aligned_dir', None)
|
||||
|
||||
|
||||
try:
|
||||
input_path = Path(args['input_dir'])
|
||||
output_path = Path(args['output_dir'])
|
||||
model_path = Path(args['model_dir'])
|
||||
|
||||
|
||||
if not input_path.exists():
|
||||
io.log_err('Input directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
@ -218,69 +218,69 @@ def main (args, device_args):
|
|||
Path(filename).unlink()
|
||||
else:
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
if not model_path.exists():
|
||||
io.log_err('Model directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
import models
|
||||
|
||||
import models
|
||||
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
|
||||
converter = model.get_converter()
|
||||
converter.dummy_predict()
|
||||
|
||||
alignments = None
|
||||
|
||||
|
||||
if converter.type == Converter.TYPE_FACE:
|
||||
if aligned_dir is None:
|
||||
io.log_err('Aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
return
|
||||
|
||||
aligned_path = Path(aligned_dir)
|
||||
if not aligned_path.exists():
|
||||
io.log_err('Aligned directory not found. Please ensure it exists.')
|
||||
return
|
||||
|
||||
return
|
||||
|
||||
alignments = {}
|
||||
|
||||
|
||||
aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
|
||||
for filepath in io.progress_bar_generator(aligned_path_image_paths, "Collecting alignments"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
|
||||
source_filename_stem = Path( dflimg.get_source_filename() ).stem
|
||||
if source_filename_stem not in alignments.keys():
|
||||
alignments[ source_filename_stem ] = []
|
||||
|
||||
alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
|
||||
|
||||
files_processed, faces_processed = ConvertSubprocessor (
|
||||
files_processed, faces_processed = ConvertSubprocessor (
|
||||
converter = converter,
|
||||
input_path_image_paths = Path_utils.get_image_paths(input_path),
|
||||
input_path_image_paths = Path_utils.get_image_paths(input_path),
|
||||
output_path = output_path,
|
||||
alignments = alignments,
|
||||
debug = args.get('debug',False)
|
||||
).run()
|
||||
|
||||
model.finalize()
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print ( 'Error: %s' % (str(e)))
|
||||
traceback.print_exc()
|
||||
|
||||
'''
|
||||
|
||||
'''
|
||||
if model_name == 'AVATAR':
|
||||
output_path_image_paths = Path_utils.get_image_paths(output_path)
|
||||
|
||||
|
||||
last_ok_frame = -1
|
||||
for filename in output_path_image_paths:
|
||||
filename_path = Path(filename)
|
||||
|
@ -289,15 +289,15 @@ if model_name == 'AVATAR':
|
|||
frame = int(stem)
|
||||
except:
|
||||
raise Exception ('Aligned avatars must be created from indexed sequence files.')
|
||||
|
||||
|
||||
if frame-last_ok_frame > 1:
|
||||
start = last_ok_frame + 1
|
||||
end = frame - 1
|
||||
|
||||
|
||||
print ("Filling gaps: [%d...%d]" % (start, end) )
|
||||
for i in range (start, end+1):
|
||||
for i in range (start, end+1):
|
||||
shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix )) ) )
|
||||
|
||||
|
||||
last_ok_frame = frame
|
||||
'''
|
||||
#interpolate landmarks
|
||||
|
@ -306,28 +306,28 @@ if model_name == 'AVATAR':
|
|||
#a = sorted(alignments.keys())
|
||||
#a_len = len(a)
|
||||
#
|
||||
#box_pts = 3
|
||||
#box_pts = 3
|
||||
#box = np.ones(box_pts)/box_pts
|
||||
#for i in range( a_len ):
|
||||
# if i >= box_pts and i <= a_len-box_pts-1:
|
||||
# af0 = alignments[ a[i] ][0] ##first face
|
||||
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
|
||||
#
|
||||
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
|
||||
#
|
||||
# points = []
|
||||
#
|
||||
#
|
||||
# for j in range(-box_pts, box_pts+1):
|
||||
# af = alignments[ a[i+j] ][0] ##first face
|
||||
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
|
||||
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
|
||||
# p = LandmarksProcessor.transform_points (af, m)
|
||||
# points.append (p)
|
||||
#
|
||||
#
|
||||
# points = np.array(points)
|
||||
# points_len = len(points)
|
||||
# t_points = np.transpose(points, [1,0,2])
|
||||
#
|
||||
#
|
||||
# p1 = np.array ( [ int(np.convolve(x[:,0], box, mode='same')[points_len//2]) for x in t_points ] )
|
||||
# p2 = np.array ( [ int(np.convolve(x[:,1], box, mode='same')[points_len//2]) for x in t_points ] )
|
||||
#
|
||||
#
|
||||
# new_points = np.concatenate( [np.expand_dims(p1,-1),np.expand_dims(p2,-1)], -1 )
|
||||
#
|
||||
#
|
||||
# alignments[ a[i] ][0] = LandmarksProcessor.transform_points (new_points, m0, True).astype(np.int32)
|
||||
|
|
|
@ -18,9 +18,9 @@ from facelib import LandmarksProcessor
|
|||
from nnlib import nnlib
|
||||
from joblib import Subprocessor
|
||||
from interact import interact as io
|
||||
|
||||
|
||||
class ExtractSubprocessor(Subprocessor):
|
||||
|
||||
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
||||
#override
|
||||
|
@ -32,19 +32,19 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.face_type = client_dict['face_type']
|
||||
self.device_idx = client_dict['device_idx']
|
||||
self.cpu_only = client_dict['device_type'] == 'CPU'
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
|
||||
self.debug_dir = client_dict['debug_dir']
|
||||
self.detector = client_dict['detector']
|
||||
|
||||
|
||||
self.cached_image = (None, None)
|
||||
|
||||
|
||||
self.e = None
|
||||
device_config = nnlib.DeviceConfig ( cpu_only=self.cpu_only, force_gpu_idx=self.device_idx, allow_growth=True)
|
||||
if self.type == 'rects':
|
||||
if self.detector is not None:
|
||||
if self.detector == 'mt':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.MTCExtractor()
|
||||
self.e = facelib.MTCExtractor()
|
||||
elif self.detector == 'dlib':
|
||||
nnlib.import_dlib (device_config)
|
||||
self.e = facelib.DLIBExtractor(nnlib.dlib)
|
||||
|
@ -53,10 +53,10 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.e = facelib.S3FDExtractor()
|
||||
else:
|
||||
raise ValueError ("Wrong detector type.")
|
||||
|
||||
|
||||
if self.e is not None:
|
||||
self.e.__enter__()
|
||||
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.LandmarksExtractor(nnlib.keras)
|
||||
|
@ -66,15 +66,15 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.second_pass_e.__enter__()
|
||||
else:
|
||||
self.second_pass_e = None
|
||||
|
||||
|
||||
elif self.type == 'final':
|
||||
pass
|
||||
|
||||
|
||||
#override
|
||||
def on_finalize(self):
|
||||
if self.e is not None:
|
||||
self.e.__exit__()
|
||||
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
filename_path = Path( data[0] )
|
||||
|
@ -84,64 +84,64 @@ class ExtractSubprocessor(Subprocessor):
|
|||
image = self.cached_image[1] #cached image for manual extractor
|
||||
else:
|
||||
image = cv2_imread( filename_path_str )
|
||||
|
||||
|
||||
if image is None:
|
||||
self.log_err ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
|
||||
return None
|
||||
|
||||
|
||||
image_shape = image.shape
|
||||
if len(image_shape) == 2:
|
||||
h, w = image.shape
|
||||
ch = 1
|
||||
ch = 1
|
||||
else:
|
||||
h, w, ch = image.shape
|
||||
|
||||
|
||||
if ch == 1:
|
||||
image = np.repeat ( image [:,:,np.newaxis], 3, -1 )
|
||||
elif ch == 4:
|
||||
image = image[:,:,0:3]
|
||||
|
||||
|
||||
wm = w % 2
|
||||
hm = h % 2
|
||||
if wm + hm != 0: #fix odd image
|
||||
image = image[0:h-hm,0:w-wm,:]
|
||||
self.cached_image = ( filename_path_str, image )
|
||||
|
||||
|
||||
src_dflimg = None
|
||||
h, w, ch = image.shape
|
||||
h, w, ch = image.shape
|
||||
if h == w:
|
||||
#extracting from already extracted jpg image?
|
||||
if filename_path.suffix == '.jpg':
|
||||
src_dflimg = DFLJPG.load ( str(filename_path) )
|
||||
|
||||
|
||||
if self.type == 'rects':
|
||||
if min(w,h) < 128:
|
||||
self.log_err ( 'Image is too small %s : [%d, %d]' % ( str(filename_path), w, h ) )
|
||||
rects = []
|
||||
else:
|
||||
else:
|
||||
rects = self.e.extract_from_bgr (image)
|
||||
|
||||
|
||||
return [str(filename_path), rects]
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
rects = data[1]
|
||||
if rects is None:
|
||||
landmarks = None
|
||||
else:
|
||||
landmarks = self.e.extract_from_bgr (image, rects, self.second_pass_e if src_dflimg is None else None)
|
||||
|
||||
else:
|
||||
landmarks = self.e.extract_from_bgr (image, rects, self.second_pass_e if src_dflimg is None else None)
|
||||
|
||||
return [str(filename_path), landmarks]
|
||||
|
||||
elif self.type == 'final':
|
||||
|
||||
|
||||
|
||||
|
||||
result = []
|
||||
faces = data[1]
|
||||
|
||||
|
||||
if self.debug_dir is not None:
|
||||
debug_output_file = str( Path(self.debug_dir) / (filename_path.stem+'.jpg') )
|
||||
debug_image = image.copy()
|
||||
|
||||
|
||||
if src_dflimg is not None and len(faces) != 1:
|
||||
#if re-extracting from dflimg and more than 1 or zero faces detected - dont process and just copy it
|
||||
print("src_dflimg is not None and len(faces) != 1", str(filename_path) )
|
||||
|
@ -151,26 +151,26 @@ class ExtractSubprocessor(Subprocessor):
|
|||
result.append (output_file)
|
||||
else:
|
||||
face_idx = 0
|
||||
for face in faces:
|
||||
for face in faces:
|
||||
rect = np.array(face[0])
|
||||
image_landmarks = face[1]
|
||||
if image_landmarks is None:
|
||||
continue
|
||||
image_landmarks = np.array(image_landmarks)
|
||||
|
||||
if self.face_type == FaceType.MARK_ONLY:
|
||||
if self.face_type == FaceType.MARK_ONLY:
|
||||
face_image = image
|
||||
face_image_landmarks = image_landmarks
|
||||
else:
|
||||
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
|
||||
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
|
||||
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
|
||||
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
|
||||
|
||||
|
||||
landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,self.image_size-1), (self.image_size-1, self.image_size-1), (self.image_size-1,0) ], image_to_face_mat, True)
|
||||
|
||||
|
||||
rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]), np.array(rect[[1,1,3,3]]))
|
||||
landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0], landmarks_bbox[:,1] )
|
||||
|
||||
|
||||
if landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
|
||||
continue
|
||||
|
||||
|
@ -192,24 +192,24 @@ class ExtractSubprocessor(Subprocessor):
|
|||
source_rect=rect,
|
||||
source_landmarks=image_landmarks.tolist(),
|
||||
image_to_face_mat=image_to_face_mat
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
result.append (output_file)
|
||||
face_idx += 1
|
||||
|
||||
|
||||
if self.debug_dir is not None:
|
||||
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
#overridable
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
|
||||
#override
|
||||
def __init__(self, input_data, type, image_size, face_type, debug_dir, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, detector=None, output_path=None):
|
||||
def __init__(self, input_data, type, image_size, face_type, debug_dir, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, detector=None, output_path=None):
|
||||
self.input_data = input_data
|
||||
self.type = type
|
||||
self.image_size = image_size
|
||||
|
@ -218,8 +218,8 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.multi_gpu = multi_gpu
|
||||
self.cpu_only = cpu_only
|
||||
self.detector = detector
|
||||
self.output_path = output_path
|
||||
self.manual = manual
|
||||
self.output_path = output_path
|
||||
self.manual = manual
|
||||
self.manual_window_size = manual_window_size
|
||||
self.result = []
|
||||
|
||||
|
@ -233,32 +233,32 @@ class ExtractSubprocessor(Subprocessor):
|
|||
io.named_window(self.wnd_name)
|
||||
io.capture_mouse(self.wnd_name)
|
||||
io.capture_keys(self.wnd_name)
|
||||
|
||||
|
||||
self.cache_original_image = (None, None)
|
||||
self.cache_image = (None, None)
|
||||
self.cache_text_lines_img = (None, None)
|
||||
self.hide_help = False
|
||||
|
||||
|
||||
self.landmarks = None
|
||||
self.x = 0
|
||||
self.y = 0
|
||||
self.rect_size = 100
|
||||
self.rect_locked = False
|
||||
self.extract_needed = True
|
||||
|
||||
|
||||
io.progress_bar (None, len (self.input_data))
|
||||
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
if self.manual == True:
|
||||
io.destroy_all_windows()
|
||||
|
||||
|
||||
io.progress_bar_close()
|
||||
|
||||
|
||||
def get_devices_for_type (self, type, multi_gpu, cpu_only):
|
||||
if 'cpu' in nnlib.device.backend:
|
||||
cpu_only = True
|
||||
|
||||
|
||||
if not cpu_only and (type == 'rects' or type == 'landmarks'):
|
||||
if type == 'rects' and (self.detector == 'mt') and nnlib.device.backend == "plaidML":
|
||||
cpu_only = True
|
||||
|
@ -269,11 +269,11 @@ class ExtractSubprocessor(Subprocessor):
|
|||
devices = [nnlib.device.getBestValidDeviceIdx()]
|
||||
if len(devices) == 0:
|
||||
devices = [0]
|
||||
|
||||
|
||||
for idx in devices:
|
||||
dev_name = nnlib.device.getDeviceName(idx)
|
||||
dev_vram = nnlib.device.getDeviceVRAMTotalGb(idx)
|
||||
|
||||
|
||||
if not self.manual and ( self.type == 'rects' and self.detector != 's3fd' ):
|
||||
for i in range ( int (max (1, dev_vram / 2) ) ):
|
||||
yield (idx, 'GPU', '%s #%d' % (dev_name,i) , dev_vram)
|
||||
|
@ -286,21 +286,21 @@ class ExtractSubprocessor(Subprocessor):
|
|||
else:
|
||||
for i in range( min(8, multiprocessing.cpu_count() // 2) ):
|
||||
yield (i, 'CPU', 'CPU%d' % (i), 0 )
|
||||
|
||||
|
||||
if type == 'final':
|
||||
for i in range( min(8, multiprocessing.cpu_count()) ):
|
||||
yield (i, 'CPU', 'CPU%d' % (i), 0 )
|
||||
|
||||
yield (i, 'CPU', 'CPU%d' % (i), 0 )
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
base_dict = {'type' : self.type,
|
||||
'image_size': self.image_size,
|
||||
'face_type': self.face_type,
|
||||
'debug_dir': self.debug_dir,
|
||||
'output_dir': str(self.output_path),
|
||||
base_dict = {'type' : self.type,
|
||||
'image_size': self.image_size,
|
||||
'face_type': self.face_type,
|
||||
'debug_dir': self.debug_dir,
|
||||
'output_dir': str(self.output_path),
|
||||
'detector': self.detector}
|
||||
|
||||
for (device_idx, device_type, device_name, device_total_vram_gb) in self.get_devices_for_type(self.type, self.multi_gpu, self.cpu_only):
|
||||
|
||||
for (device_idx, device_type, device_name, device_total_vram_gb) in self.get_devices_for_type(self.type, self.multi_gpu, self.cpu_only):
|
||||
client_dict = base_dict.copy()
|
||||
client_dict['device_idx'] = device_idx
|
||||
client_dict['device_name'] = device_name
|
||||
|
@ -311,7 +311,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
def get_data(self, host_dict):
|
||||
if not self.manual:
|
||||
if len (self.input_data) > 0:
|
||||
return self.input_data.pop(0)
|
||||
return self.input_data.pop(0)
|
||||
else:
|
||||
|
||||
need_remark_face = False
|
||||
|
@ -327,7 +327,7 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.rect, self.landmarks = faces.pop()
|
||||
faces.clear()
|
||||
redraw_needed = True
|
||||
self.rect_locked = True
|
||||
self.rect_locked = True
|
||||
self.rect_size = ( self.rect[2] - self.rect[0] ) / 2
|
||||
self.x = ( self.rect[0] + self.rect[2] ) / 2
|
||||
self.y = ( self.rect[1] + self.rect[3] ) / 2
|
||||
|
@ -338,19 +338,19 @@ class ExtractSubprocessor(Subprocessor):
|
|||
else:
|
||||
self.original_image = cv2_imread( filename )
|
||||
self.cache_original_image = (filename, self.original_image )
|
||||
|
||||
|
||||
(h,w,c) = self.original_image.shape
|
||||
self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0/9.0) )
|
||||
|
||||
|
||||
if self.cache_image[0] == (h,w,c) + (self.view_scale,filename):
|
||||
self.image = self.cache_image[1]
|
||||
else:
|
||||
self.image = cv2.resize (self.original_image, ( int(w*self.view_scale), int(h*self.view_scale) ), interpolation=cv2.INTER_LINEAR)
|
||||
else:
|
||||
self.image = cv2.resize (self.original_image, ( int(w*self.view_scale), int(h*self.view_scale) ), interpolation=cv2.INTER_LINEAR)
|
||||
self.cache_image = ( (h,w,c) + (self.view_scale,filename), self.image )
|
||||
|
||||
|
||||
(h,w,c) = self.image.shape
|
||||
|
||||
sh = (0,0, w, min(100, h) )
|
||||
|
||||
sh = (0,0, w, min(100, h) )
|
||||
if self.cache_text_lines_img[0] == sh:
|
||||
self.text_lines_img = self.cache_text_lines_img[1]
|
||||
else:
|
||||
|
@ -362,30 +362,30 @@ class ExtractSubprocessor(Subprocessor):
|
|||
'[,] [.]- prev frame, next frame. [Q] - skip remaining frames',
|
||||
'[h] - hide this help'
|
||||
], (1, 1, 1) )*255).astype(np.uint8)
|
||||
|
||||
|
||||
self.cache_text_lines_img = (sh, self.text_lines_img)
|
||||
|
||||
while True:
|
||||
io.process_messages(0.0001)
|
||||
|
||||
|
||||
new_x = self.x
|
||||
new_y = self.y
|
||||
new_rect_size = self.rect_size
|
||||
|
||||
|
||||
mouse_events = io.get_mouse_events(self.wnd_name)
|
||||
for ev in mouse_events:
|
||||
(x, y, ev, flags) = ev
|
||||
if ev == io.EVENT_MOUSEWHEEL and not self.rect_locked:
|
||||
mod = 1 if flags > 0 else -1
|
||||
mod = 1 if flags > 0 else -1
|
||||
diff = 1 if new_rect_size <= 40 else np.clip(new_rect_size / 10, 1, 10)
|
||||
new_rect_size = max (5, new_rect_size + diff*mod)
|
||||
new_rect_size = max (5, new_rect_size + diff*mod)
|
||||
elif ev == io.EVENT_LBUTTONDOWN:
|
||||
self.rect_locked = not self.rect_locked
|
||||
self.extract_needed = True
|
||||
elif not self.rect_locked:
|
||||
new_x = np.clip (x, 0, w-1) / self.view_scale
|
||||
new_y = np.clip (y, 0, h-1) / self.view_scale
|
||||
|
||||
|
||||
key_events = io.get_key_events(self.wnd_name)
|
||||
key, = key_events[-1] if len(key_events) > 0 else (0,)
|
||||
|
||||
|
@ -393,48 +393,48 @@ class ExtractSubprocessor(Subprocessor):
|
|||
#confirm frame
|
||||
is_frame_done = True
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
|
||||
|
||||
break
|
||||
elif key == ord(' '):
|
||||
#confirm skip frame
|
||||
is_frame_done = True
|
||||
break
|
||||
elif key == ord(',') and len(self.result) > 0:
|
||||
#go prev frame
|
||||
|
||||
elif key == ord(',') and len(self.result) > 0:
|
||||
#go prev frame
|
||||
|
||||
if self.rect_locked:
|
||||
# Only save the face if the rect is still locked
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
|
||||
|
||||
self.input_data.insert(0, self.result.pop() )
|
||||
io.progress_bar_inc(-1)
|
||||
need_remark_face = True
|
||||
|
||||
|
||||
break
|
||||
elif key == ord('.'):
|
||||
#go next frame
|
||||
|
||||
elif key == ord('.'):
|
||||
#go next frame
|
||||
|
||||
if self.rect_locked:
|
||||
# Only save the face if the rect is still locked
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
need_remark_face = True
|
||||
is_frame_done = True
|
||||
break
|
||||
break
|
||||
elif key == ord('q'):
|
||||
#skip remaining
|
||||
|
||||
|
||||
if self.rect_locked:
|
||||
faces.append ( [(self.rect), self.landmarks] )
|
||||
while len(self.input_data) > 0:
|
||||
self.result.append( self.input_data.pop(0) )
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
|
||||
break
|
||||
|
||||
|
||||
elif key == ord('h'):
|
||||
self.hide_help = not self.hide_help
|
||||
break
|
||||
|
||||
|
||||
if self.x != new_x or \
|
||||
self.y != new_y or \
|
||||
self.rect_size != new_rect_size or \
|
||||
|
@ -443,33 +443,33 @@ class ExtractSubprocessor(Subprocessor):
|
|||
self.x = new_x
|
||||
self.y = new_y
|
||||
self.rect_size = new_rect_size
|
||||
self.rect = ( int(self.x-self.rect_size),
|
||||
int(self.y-self.rect_size),
|
||||
int(self.x+self.rect_size),
|
||||
self.rect = ( int(self.x-self.rect_size),
|
||||
int(self.y-self.rect_size),
|
||||
int(self.x+self.rect_size),
|
||||
int(self.y+self.rect_size) )
|
||||
|
||||
|
||||
if redraw_needed:
|
||||
redraw_needed = False
|
||||
return [filename, None]
|
||||
else:
|
||||
return [filename, [self.rect]]
|
||||
|
||||
|
||||
else:
|
||||
is_frame_done = True
|
||||
|
||||
|
||||
if is_frame_done:
|
||||
self.result.append ( data )
|
||||
self.input_data.pop(0)
|
||||
io.progress_bar_inc(1)
|
||||
self.extract_needed = True
|
||||
self.rect_locked = False
|
||||
self.rect_locked = False
|
||||
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
if not self.manual:
|
||||
self.input_data.insert(0, data)
|
||||
self.input_data.insert(0, data)
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
|
@ -477,33 +477,33 @@ class ExtractSubprocessor(Subprocessor):
|
|||
filename, landmarks = result
|
||||
if landmarks is not None:
|
||||
self.landmarks = landmarks[0][1]
|
||||
|
||||
|
||||
(h,w,c) = self.image.shape
|
||||
|
||||
|
||||
if not self.hide_help:
|
||||
image = cv2.addWeighted (self.image,1.0,self.text_lines_img,1.0,0)
|
||||
else:
|
||||
image = self.image.copy()
|
||||
|
||||
|
||||
view_rect = (np.array(self.rect) * self.view_scale).astype(np.int).tolist()
|
||||
view_landmarks = (np.array(self.landmarks) * self.view_scale).astype(np.int).tolist()
|
||||
|
||||
|
||||
if self.rect_size <= 40:
|
||||
scaled_rect_size = h // 3 if w > h else w // 3
|
||||
|
||||
p1 = (self.x - self.rect_size, self.y - self.rect_size)
|
||||
p2 = (self.x + self.rect_size, self.y - self.rect_size)
|
||||
p3 = (self.x - self.rect_size, self.y + self.rect_size)
|
||||
|
||||
wh = h if h < w else w
|
||||
|
||||
wh = h if h < w else w
|
||||
np1 = (w / 2 - wh / 4, h / 2 - wh / 4)
|
||||
np2 = (w / 2 + wh / 4, h / 2 - wh / 4)
|
||||
np3 = (w / 2 - wh / 4, h / 2 + wh / 4)
|
||||
|
||||
|
||||
mat = cv2.getAffineTransform( np.float32([p1,p2,p3])*self.view_scale, np.float32([np1,np2,np3]) )
|
||||
image = cv2.warpAffine(image, mat,(w,h) )
|
||||
image = cv2.warpAffine(image, mat,(w,h) )
|
||||
view_landmarks = LandmarksProcessor.transform_points (view_landmarks, mat)
|
||||
|
||||
|
||||
landmarks_color = (255,255,0) if self.rect_locked else (0,255,0)
|
||||
LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.image_size, self.face_type, landmarks_color=landmarks_color)
|
||||
self.extract_needed = False
|
||||
|
@ -513,10 +513,10 @@ class ExtractSubprocessor(Subprocessor):
|
|||
if self.type == 'rects':
|
||||
self.result.append ( result )
|
||||
elif self.type == 'landmarks':
|
||||
self.result.append ( result )
|
||||
self.result.append ( result )
|
||||
elif self.type == 'final':
|
||||
self.result += result
|
||||
|
||||
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
#override
|
||||
|
@ -530,47 +530,47 @@ class DeletedFilesSearcherSubprocessor(Subprocessor):
|
|||
def on_initialize(self, client_dict):
|
||||
self.debug_paths_stems = client_dict['debug_paths_stems']
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
input_path_stem = Path(data[0]).stem
|
||||
def process_data(self, data):
|
||||
input_path_stem = Path(data[0]).stem
|
||||
return any ( [ input_path_stem == d_stem for d_stem in self.debug_paths_stems] )
|
||||
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
|
||||
#override
|
||||
def __init__(self, input_paths, debug_paths ):
|
||||
def __init__(self, input_paths, debug_paths ):
|
||||
self.input_paths = input_paths
|
||||
self.debug_paths_stems = [ Path(d).stem for d in debug_paths]
|
||||
self.debug_paths_stems = [ Path(d).stem for d in debug_paths]
|
||||
self.result = []
|
||||
super().__init__('DeletedFilesSearcherSubprocessor', DeletedFilesSearcherSubprocessor.Cli, 60)
|
||||
|
||||
super().__init__('DeletedFilesSearcherSubprocessor', DeletedFilesSearcherSubprocessor.Cli, 60)
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
def process_info_generator(self):
|
||||
for i in range(min(multiprocessing.cpu_count(), 8)):
|
||||
yield 'CPU%d' % (i), {}, {'debug_paths_stems' : self.debug_paths_stems}
|
||||
|
||||
#override
|
||||
def on_clients_initialized(self):
|
||||
io.progress_bar ("Searching deleted files", len (self.input_paths))
|
||||
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.input_paths) > 0:
|
||||
return [self.input_paths.pop(0)]
|
||||
if len (self.input_paths) > 0:
|
||||
return [self.input_paths.pop(0)]
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.input_paths.insert(0, data[0])
|
||||
|
||||
self.input_paths.insert(0, data[0])
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
if result == False:
|
||||
|
@ -591,40 +591,40 @@ def main(input_dir,
|
|||
image_size=256,
|
||||
face_type='full_face',
|
||||
device_args={}):
|
||||
|
||||
|
||||
input_path = Path(input_dir)
|
||||
output_path = Path(output_dir)
|
||||
face_type = FaceType.fromString(face_type)
|
||||
|
||||
|
||||
multi_gpu = device_args.get('multi_gpu', False)
|
||||
cpu_only = device_args.get('cpu_only', False)
|
||||
|
||||
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
|
||||
if output_path.exists():
|
||||
if not manual_output_debug_fix and input_path != output_path:
|
||||
for filename in Path_utils.get_image_paths(output_path):
|
||||
Path(filename).unlink()
|
||||
else:
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if debug_dir is None:
|
||||
raise ValueError('debug-dir must be specified')
|
||||
detector = 'manual'
|
||||
io.log_info('Performing re-extract frames which were deleted from _debug directory.')
|
||||
|
||||
|
||||
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose_print_func=io.log_info)
|
||||
if debug_dir is not None:
|
||||
debug_output_path = Path(debug_dir)
|
||||
|
||||
|
||||
if manual_output_debug_fix:
|
||||
if not debug_output_path.exists():
|
||||
raise ValueError("%s not found " % ( str(debug_output_path) ))
|
||||
|
||||
input_path_image_paths = DeletedFilesSearcherSubprocessor (input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).run()
|
||||
input_path_image_paths = sorted (input_path_image_paths)
|
||||
input_path_image_paths = sorted (input_path_image_paths)
|
||||
else:
|
||||
if debug_output_path.exists():
|
||||
for filename in Path_utils.get_image_paths(debug_output_path):
|
||||
|
@ -634,20 +634,20 @@ def main(input_dir,
|
|||
|
||||
images_found = len(input_path_image_paths)
|
||||
faces_detected = 0
|
||||
if images_found != 0:
|
||||
if images_found != 0:
|
||||
if detector == 'manual':
|
||||
io.log_info ('Performing manual extract...')
|
||||
extracted_faces = ExtractSubprocessor ([ (filename,[]) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug_dir, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).run()
|
||||
else:
|
||||
io.log_info ('Performing 1st pass...')
|
||||
extracted_rects = ExtractSubprocessor ([ (x,) for x in input_path_image_paths ], 'rects', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, detector=detector).run()
|
||||
|
||||
|
||||
io.log_info ('Performing 2nd pass...')
|
||||
extracted_faces = ExtractSubprocessor (extracted_rects, 'landmarks', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run()
|
||||
|
||||
|
||||
if manual_fix:
|
||||
io.log_info ('Performing manual fix...')
|
||||
|
||||
|
||||
if all ( np.array ( [ len(data[1]) > 0 for data in extracted_faces] ) == True ):
|
||||
io.log_info ('All faces are detected, manual fix not needed.')
|
||||
else:
|
||||
|
@ -657,8 +657,8 @@ def main(input_dir,
|
|||
io.log_info ('Performing 3rd pass...')
|
||||
final_imgs_paths = ExtractSubprocessor (extracted_faces, 'final', image_size, face_type, debug_dir, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, output_path=output_path).run()
|
||||
faces_detected = len(final_imgs_paths)
|
||||
|
||||
|
||||
io.log_info ('-------------------------')
|
||||
io.log_info ('Images found: %d' % (images_found) )
|
||||
io.log_info ('Faces detected: %d' % (faces_detected) )
|
||||
io.log_info ('-------------------------')
|
||||
io.log_info ('-------------------------')
|
||||
|
|
|
@ -17,18 +17,18 @@ from facelib import LandmarksProcessor
|
|||
def main(input_dir, output_dir):
|
||||
input_path = Path(input_dir)
|
||||
output_path = Path(output_dir)
|
||||
|
||||
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
|
||||
if not output_path.exists():
|
||||
output_path.mkdir(parents=True)
|
||||
|
||||
|
||||
wnd_name = "Labeling tool"
|
||||
io.named_window (wnd_name)
|
||||
io.capture_mouse(wnd_name)
|
||||
io.capture_keys(wnd_name)
|
||||
|
||||
|
||||
#for filename in io.progress_bar_generator (Path_utils.get_image_paths(input_path), desc="Labeling"):
|
||||
for filename in Path_utils.get_image_paths(input_path):
|
||||
filepath = Path(filename)
|
||||
|
@ -39,165 +39,165 @@ def main(input_dir, output_dir):
|
|||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
|
||||
lmrks = dflimg.get_landmarks()
|
||||
lmrks_list = lmrks.tolist()
|
||||
orig_img = cv2_imread(str(filepath))
|
||||
h,w,c = orig_img.shape
|
||||
|
||||
|
||||
mask_orig = LandmarksProcessor.get_image_hull_mask( orig_img.shape, lmrks).astype(np.uint8)[:,:,0]
|
||||
ero_dil_rate = w // 8
|
||||
mask_ero = cv2.erode (mask_orig, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero_dil_rate,ero_dil_rate)), iterations = 1 )
|
||||
mask_dil = cv2.dilate(mask_orig, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero_dil_rate,ero_dil_rate)), iterations = 1 )
|
||||
|
||||
|
||||
|
||||
|
||||
#mask_bg = np.zeros(orig_img.shape[:2],np.uint8)
|
||||
mask_bg = 1-mask_dil
|
||||
mask_bgp = np.ones(orig_img.shape[:2],np.uint8) #default - all background possible
|
||||
mask_fg = np.zeros(orig_img.shape[:2],np.uint8)
|
||||
mask_fgp = np.zeros(orig_img.shape[:2],np.uint8)
|
||||
|
||||
|
||||
img = orig_img.copy()
|
||||
|
||||
l_thick=2
|
||||
|
||||
|
||||
def draw_4_lines (masks_out, pts, thickness=1):
|
||||
fgp,fg,bg,bgp = masks_out
|
||||
h,w = fg.shape
|
||||
|
||||
|
||||
fgp_pts = []
|
||||
fg_pts = np.array([ pts[i:i+2] for i in range(len(pts)-1)])
|
||||
bg_pts = []
|
||||
bgp_pts = []
|
||||
|
||||
|
||||
for i in range(len(fg_pts)):
|
||||
a, b = line = fg_pts[i]
|
||||
|
||||
|
||||
ba = b-a
|
||||
v = ba / npl.norm(ba)
|
||||
|
||||
|
||||
ccpv = np.array([v[1],-v[0]])
|
||||
cpv = np.array([-v[1],v[0]])
|
||||
step = 1 / max(np.abs(cpv))
|
||||
|
||||
|
||||
fgp_pts.append ( np.clip (line + ccpv * step * thickness, 0, w-1 ).astype(np.int) )
|
||||
bg_pts.append ( np.clip (line + cpv * step * thickness, 0, w-1 ).astype(np.int) )
|
||||
bgp_pts.append ( np.clip (line + cpv * step * thickness * 2, 0, w-1 ).astype(np.int) )
|
||||
|
||||
|
||||
fgp_pts = np.array(fgp_pts)
|
||||
bg_pts = np.array(bg_pts)
|
||||
bgp_pts = np.array(bgp_pts)
|
||||
|
||||
|
||||
cv2.polylines(fgp, fgp_pts, False, (1,), thickness=thickness)
|
||||
cv2.polylines(fg, fg_pts, False, (1,), thickness=thickness)
|
||||
cv2.polylines(bg, bg_pts, False, (1,), thickness=thickness)
|
||||
cv2.polylines(bgp, bgp_pts, False, (1,), thickness=thickness)
|
||||
|
||||
|
||||
def draw_lines ( masks_steps, pts, thickness=1):
|
||||
lines = np.array([ pts[i:i+2] for i in range(len(pts)-1)])
|
||||
|
||||
|
||||
|
||||
|
||||
for mask, step in masks_steps:
|
||||
h,w = mask.shape
|
||||
|
||||
|
||||
mask_lines = []
|
||||
for i in range(len(lines)):
|
||||
a, b = line = lines[i]
|
||||
a, b = line = lines[i]
|
||||
ba = b-a
|
||||
ba_len = npl.norm(ba)
|
||||
if ba_len != 0:
|
||||
v = ba / ba_len
|
||||
pv = np.array([-v[1],v[0]])
|
||||
pv = np.array([-v[1],v[0]])
|
||||
pv_inv_max = 1 / max(np.abs(pv))
|
||||
mask_lines.append ( np.clip (line + pv * pv_inv_max * thickness * step, 0, w-1 ).astype(np.int) )
|
||||
else:
|
||||
mask_lines.append ( np.array(line, dtype=np.int) )
|
||||
cv2.polylines(mask, mask_lines, False, (1,), thickness=thickness)
|
||||
|
||||
|
||||
def draw_fill_convex( mask_out, pts, scale=1.0 ):
|
||||
hull = cv2.convexHull(np.array(pts))
|
||||
|
||||
|
||||
if scale !=1.0:
|
||||
pts_count = hull.shape[0]
|
||||
|
||||
|
||||
sum_x = np.sum(hull[:, 0, 0])
|
||||
sum_y = np.sum(hull[:, 0, 1])
|
||||
|
||||
|
||||
hull_center = np.array([sum_x/pts_count, sum_y/pts_count])
|
||||
hull = hull_center+(hull-hull_center)*scale
|
||||
hull = hull.astype(pts.dtype)
|
||||
cv2.fillConvexPoly( mask_out, hull, (1,) )
|
||||
|
||||
|
||||
def get_gc_mask_bgr(gc_mask):
|
||||
h, w = gc_mask.shape
|
||||
bgr = np.zeros( (h,w,3), dtype=np.uint8 )
|
||||
|
||||
|
||||
bgr [ gc_mask == 0 ] = (0,0,0)
|
||||
bgr [ gc_mask == 1 ] = (255,255,255)
|
||||
bgr [ gc_mask == 2 ] = (0,0,255) #RED
|
||||
bgr [ gc_mask == 3 ] = (0,255,0) #GREEN
|
||||
return bgr
|
||||
|
||||
|
||||
def get_gc_mask_result(gc_mask):
|
||||
return np.where((gc_mask==1) + (gc_mask==3),1,0).astype(np.int)
|
||||
|
||||
|
||||
#convex inner of right chin to end of right eyebrow
|
||||
#draw_fill_convex ( mask_fgp, lmrks_list[8:17]+lmrks_list[26:27] )
|
||||
|
||||
#draw_fill_convex ( mask_fgp, lmrks_list[8:17]+lmrks_list[26:27] )
|
||||
|
||||
#convex inner of start right chin to right eyebrow
|
||||
#draw_fill_convex ( mask_fgp, lmrks_list[8:9]+lmrks_list[22:27] )
|
||||
|
||||
#draw_fill_convex ( mask_fgp, lmrks_list[8:9]+lmrks_list[22:27] )
|
||||
|
||||
#convex inner of nose
|
||||
draw_fill_convex ( mask_fgp, lmrks[27:36] )
|
||||
|
||||
draw_fill_convex ( mask_fgp, lmrks[27:36] )
|
||||
|
||||
#convex inner of nose half
|
||||
draw_fill_convex ( mask_fg, lmrks[27:36], scale=0.5 )
|
||||
|
||||
|
||||
draw_fill_convex ( mask_fg, lmrks[27:36], scale=0.5 )
|
||||
|
||||
|
||||
#left corner of mouth to left corner of nose
|
||||
#draw_lines ( [ (mask_fg,0), ], lmrks_list[49:50]+lmrks_list[32:33], l_thick)
|
||||
|
||||
|
||||
#convex inner: right corner of nose to centers of eyebrows
|
||||
#draw_fill_convex ( mask_fgp, lmrks_list[35:36]+lmrks_list[19:20]+lmrks_list[24:25])
|
||||
|
||||
|
||||
#right corner of mouth to right corner of nose
|
||||
#draw_lines ( [ (mask_fg,0), ], lmrks_list[54:55]+lmrks_list[35:36], l_thick)
|
||||
|
||||
#left eye
|
||||
#draw_fill_convex ( mask_fg, lmrks_list[36:40] )
|
||||
#draw_fill_convex ( mask_fg, lmrks_list[36:40] )
|
||||
#right eye
|
||||
#draw_fill_convex ( mask_fg, lmrks_list[42:48] )
|
||||
|
||||
|
||||
#right chin
|
||||
draw_lines ( [ (mask_bg,0), (mask_fg,-1), ], lmrks[8:17], l_thick)
|
||||
|
||||
|
||||
#left eyebrow center to right eyeprow center
|
||||
draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[19:20] + lmrks_list[24:25], l_thick)
|
||||
# #draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[24:25] + lmrks_list[19:17:-1], l_thick)
|
||||
|
||||
draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[19:20] + lmrks_list[24:25], l_thick)
|
||||
# #draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[24:25] + lmrks_list[19:17:-1], l_thick)
|
||||
|
||||
#half right eyebrow to end of right chin
|
||||
draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[24:27] + lmrks_list[16:17], l_thick)
|
||||
|
||||
|
||||
#import code
|
||||
#code.interact(local=dict(globals(), **locals()))
|
||||
|
||||
|
||||
#compose mask layers
|
||||
gc_mask = np.zeros(orig_img.shape[:2],np.uint8)
|
||||
gc_mask [ mask_bgp==1 ] = 2
|
||||
gc_mask [ mask_fgp==1 ] = 3
|
||||
gc_mask [ mask_bg==1 ] = 0
|
||||
gc_mask [ mask_bg==1 ] = 0
|
||||
gc_mask [ mask_fg==1 ] = 1
|
||||
|
||||
|
||||
gc_bgr_before = get_gc_mask_bgr (gc_mask)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#io.show_image (wnd_name, gc_mask )
|
||||
|
||||
|
||||
##points, hierarcy = cv2.findContours(original_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||||
##gc_mask = ( (1-erode_mask)*2 + erode_mask )# * dilate_mask
|
||||
#gc_mask = (1-erode_mask)*2 + erode_mask
|
||||
|
@ -211,34 +211,34 @@ def main(input_dir, output_dir):
|
|||
#
|
||||
#
|
||||
cv2.grabCut(img,gc_mask,None,np.zeros((1,65),np.float64),np.zeros((1,65),np.float64),5, cv2.GC_INIT_WITH_MASK)
|
||||
|
||||
|
||||
gc_bgr = get_gc_mask_bgr (gc_mask)
|
||||
gc_mask_result = get_gc_mask_result(gc_mask)
|
||||
gc_mask_result_1 = gc_mask_result[:,:,np.newaxis]
|
||||
|
||||
gc_mask_result_1 = gc_mask_result[:,:,np.newaxis]
|
||||
|
||||
#import code
|
||||
#code.interact(local=dict(globals(), **locals()))
|
||||
orig_img_gc_layers_masked = (0.5*orig_img + 0.5*gc_bgr).astype(np.uint8)
|
||||
orig_img_gc_before_layers_masked = (0.5*orig_img + 0.5*gc_bgr_before).astype(np.uint8)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pink_bg = np.full ( orig_img.shape, (255,0,255), dtype=np.uint8 )
|
||||
|
||||
|
||||
|
||||
|
||||
orig_img_result = orig_img * gc_mask_result_1
|
||||
orig_img_result_pinked = orig_img_result + pink_bg * (1-gc_mask_result_1)
|
||||
|
||||
|
||||
#io.show_image (wnd_name, blended_img)
|
||||
|
||||
##gc_mask, bgdModel, fgdModel =
|
||||
|
||||
##gc_mask, bgdModel, fgdModel =
|
||||
#
|
||||
#mask2 = np.where((gc_mask==1) + (gc_mask==3),255,0).astype('uint8')[:,:,np.newaxis]
|
||||
#mask2 = np.repeat(mask2, (3,), -1)
|
||||
#
|
||||
##mask2 = np.where(gc_mask!=0,255,0).astype('uint8')
|
||||
#blended_img = orig_img #-\
|
||||
# #0.3 * np.full(original_img.shape, (50,50,50)) * (1-mask_0_27)[:,:,np.newaxis]
|
||||
# #0.3 * np.full(original_img.shape, (50,50,50)) * (1-mask_0_27)[:,:,np.newaxis]
|
||||
# #0.3 * np.full(original_img.shape, (50,50,50)) * (1-dilate_mask)[:,:,np.newaxis] +\
|
||||
# #0.3 * np.full(original_img.shape, (50,50,50)) * (erode_mask)[:,:,np.newaxis]
|
||||
#blended_img = np.clip(blended_img, 0, 255).astype(np.uint8)
|
||||
|
@ -246,25 +246,25 @@ def main(input_dir, output_dir):
|
|||
##code.interact(local=dict(globals(), **locals()))
|
||||
orig_img_lmrked = orig_img.copy()
|
||||
LandmarksProcessor.draw_landmarks(orig_img_lmrked, lmrks, transparent_mask=True)
|
||||
|
||||
|
||||
screen = np.concatenate ([orig_img_gc_before_layers_masked,
|
||||
orig_img_gc_layers_masked,
|
||||
orig_img,
|
||||
orig_img_lmrked,
|
||||
orig_img_result_pinked,
|
||||
orig_img_result,
|
||||
orig_img_result,
|
||||
], axis=1)
|
||||
|
||||
io.show_image (wnd_name, screen.astype(np.uint8) )
|
||||
|
||||
|
||||
|
||||
|
||||
while True:
|
||||
io.process_messages()
|
||||
|
||||
|
||||
for (x,y,ev,flags) in io.get_mouse_events(wnd_name):
|
||||
pass
|
||||
#print (x,y,ev,flags)
|
||||
|
||||
|
||||
key_events = [ ev for ev, in io.get_key_events(wnd_name) ]
|
||||
for key in key_events:
|
||||
if key == ord('1'):
|
||||
|
@ -273,15 +273,15 @@ def main(input_dir, output_dir):
|
|||
pass
|
||||
if key == ord('3'):
|
||||
pass
|
||||
|
||||
if ord(' ') in key_events:
|
||||
|
||||
if ord(' ') in key_events:
|
||||
break
|
||||
|
||||
|
||||
import code
|
||||
code.interact(local=dict(globals(), **locals()))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#original_mask = np.ones(original_img.shape[:2],np.uint8)*2
|
||||
#cv2.drawContours(original_mask, points, -1, (1,), 1)
|
||||
#cv2.drawContours(original_mask, points, -1, (1,), 1)
|
||||
|
|
|
@ -15,10 +15,10 @@ from joblib import Subprocessor
|
|||
import multiprocessing
|
||||
from interact import interact as io
|
||||
from imagelib import estimate_sharpness
|
||||
|
||||
|
||||
class BlurEstimatorSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
||||
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
self.log_info('Running on %s.' % (client_dict['device_name']) )
|
||||
|
@ -26,58 +26,58 @@ class BlurEstimatorSubprocessor(Subprocessor):
|
|||
#override
|
||||
def process_data(self, data):
|
||||
filepath = Path( data[0] )
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is not None:
|
||||
image = cv2_imread( str(filepath) )
|
||||
return [ str(filepath), estimate_sharpness(image) ]
|
||||
else:
|
||||
self.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
self.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
return [ str(filepath), 0 ]
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
|
||||
#override
|
||||
def __init__(self, input_data ):
|
||||
def __init__(self, input_data ):
|
||||
self.input_data = input_data
|
||||
self.img_list = []
|
||||
self.trash_img_list = []
|
||||
super().__init__('BlurEstimator', BlurEstimatorSubprocessor.Cli, 60)
|
||||
super().__init__('BlurEstimator', BlurEstimatorSubprocessor.Cli, 60)
|
||||
|
||||
#override
|
||||
def on_clients_initialized(self):
|
||||
io.progress_bar ("", len (self.input_data))
|
||||
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close ()
|
||||
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
def process_info_generator(self):
|
||||
for i in range(0, multiprocessing.cpu_count() ):
|
||||
yield 'CPU%d' % (i), {}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'device_name': 'CPU%d' % (i),
|
||||
}
|
||||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.input_data) > 0:
|
||||
return self.input_data.pop(0)
|
||||
|
||||
return self.input_data.pop(0)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.input_data.insert(0, data)
|
||||
self.input_data.insert(0, data)
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
|
@ -85,20 +85,20 @@ class BlurEstimatorSubprocessor(Subprocessor):
|
|||
self.trash_img_list.append ( result )
|
||||
else:
|
||||
self.img_list.append ( result )
|
||||
|
||||
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
|
||||
#override
|
||||
def get_result(self):
|
||||
return self.img_list, self.trash_img_list
|
||||
|
||||
|
||||
|
||||
def sort_by_blur(input_path):
|
||||
io.log_info ("Sorting by blur...")
|
||||
|
||||
img_list = [ (filename,[]) for filename in Path_utils.get_image_paths(input_path) ]
|
||||
io.log_info ("Sorting by blur...")
|
||||
|
||||
img_list = [ (filename,[]) for filename in Path_utils.get_image_paths(input_path) ]
|
||||
img_list, trash_img_list = BlurEstimatorSubprocessor (img_list).run()
|
||||
|
||||
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
|
@ -111,21 +111,21 @@ def sort_by_face(input_path):
|
|||
trash_img_list = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
trash_img_list.append ( [str(filepath)] )
|
||||
continue
|
||||
|
||||
img_list.append( [str(filepath), dflimg.get_landmarks()] )
|
||||
|
||||
|
||||
|
||||
img_list_len = len(img_list)
|
||||
for i in io.progress_bar_generator ( range(0, img_list_len-1), "Sorting"):
|
||||
|
@ -152,21 +152,21 @@ def sort_by_face_dissim(input_path):
|
|||
trash_img_list = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
trash_img_list.append ( [str(filepath)] )
|
||||
continue
|
||||
|
||||
continue
|
||||
|
||||
img_list.append( [str(filepath), dflimg.get_landmarks(), 0 ] )
|
||||
|
||||
|
||||
img_list_len = len(img_list)
|
||||
for i in io.progress_bar_generator( range(img_list_len-1), "Sorting"):
|
||||
score_total = 0
|
||||
|
@ -183,79 +183,79 @@ def sort_by_face_dissim(input_path):
|
|||
img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
def sort_by_face_yaw(input_path):
|
||||
io.log_info ("Sorting by face yaw...")
|
||||
img_list = []
|
||||
trash_img_list = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
trash_img_list.append ( [str(filepath)] )
|
||||
continue
|
||||
|
||||
|
||||
pitch, yaw = LandmarksProcessor.estimate_pitch_yaw ( dflimg.get_landmarks() )
|
||||
|
||||
|
||||
img_list.append( [str(filepath), yaw ] )
|
||||
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
def sort_by_face_pitch(input_path):
|
||||
io.log_info ("Sorting by face pitch...")
|
||||
img_list = []
|
||||
trash_img_list = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
trash_img_list.append ( [str(filepath)] )
|
||||
continue
|
||||
|
||||
|
||||
pitch, yaw = LandmarksProcessor.estimate_pitch_yaw ( dflimg.get_landmarks() )
|
||||
|
||||
|
||||
img_list.append( [str(filepath), pitch ] )
|
||||
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
class HistSsimSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
def on_initialize(self, client_dict):
|
||||
self.log_info ('Running on %s.' % (client_dict['device_name']) )
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
img_list = []
|
||||
for x in data:
|
||||
img = cv2_imread(x)
|
||||
img = cv2_imread(x)
|
||||
img_list.append ([x, cv2.calcHist([img], [0], None, [256], [0, 256]),
|
||||
cv2.calcHist([img], [1], None, [256], [0, 256]),
|
||||
cv2.calcHist([img], [2], None, [256], [0, 256])
|
||||
])
|
||||
|
||||
|
||||
img_list_len = len(img_list)
|
||||
for i in range(img_list_len-1):
|
||||
min_score = float("inf")
|
||||
|
@ -268,23 +268,23 @@ class HistSsimSubprocessor(Subprocessor):
|
|||
min_score = score
|
||||
j_min_score = j
|
||||
img_list[i+1], img_list[j_min_score] = img_list[j_min_score], img_list[i+1]
|
||||
|
||||
|
||||
self.progress_bar_inc(1)
|
||||
|
||||
return img_list
|
||||
|
||||
return img_list
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
return "Bunch of images"
|
||||
|
||||
|
||||
#override
|
||||
def __init__(self, img_list ):
|
||||
def __init__(self, img_list ):
|
||||
self.img_list = img_list
|
||||
self.img_list_len = len(img_list)
|
||||
|
||||
|
||||
slice_count = 20000
|
||||
sliced_count = self.img_list_len // slice_count
|
||||
|
||||
|
||||
if sliced_count > 12:
|
||||
sliced_count = 11.9
|
||||
slice_count = int(self.img_list_len / sliced_count)
|
||||
|
@ -294,10 +294,10 @@ class HistSsimSubprocessor(Subprocessor):
|
|||
[ self.img_list[sliced_count*slice_count:] ]
|
||||
|
||||
self.result = []
|
||||
super().__init__('HistSsim', HistSsimSubprocessor.Cli, 0)
|
||||
super().__init__('HistSsim', HistSsimSubprocessor.Cli, 0)
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
def process_info_generator(self):
|
||||
for i in range( len(self.img_chunks_list) ):
|
||||
yield 'CPU%d' % (i), {'i':i}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i)
|
||||
|
@ -306,21 +306,21 @@ class HistSsimSubprocessor(Subprocessor):
|
|||
def on_clients_initialized(self):
|
||||
io.progress_bar ("Sorting", len(self.img_list))
|
||||
io.progress_bar_inc(len(self.img_chunks_list))
|
||||
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.img_chunks_list) > 0:
|
||||
return self.img_chunks_list.pop(0)
|
||||
def get_data(self, host_dict):
|
||||
if len (self.img_chunks_list) > 0:
|
||||
return self.img_chunks_list.pop(0)
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
raise Exception("Fail to process data. Decrease number of images and try again.")
|
||||
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
self.result += result
|
||||
|
@ -329,10 +329,10 @@ class HistSsimSubprocessor(Subprocessor):
|
|||
#override
|
||||
def get_result(self):
|
||||
return self.result
|
||||
|
||||
|
||||
def sort_by_hist(input_path):
|
||||
io.log_info ("Sorting by histogram similarity...")
|
||||
img_list = HistSsimSubprocessor(Path_utils.get_image_paths(input_path)).run()
|
||||
img_list = HistSsimSubprocessor(Path_utils.get_image_paths(input_path)).run()
|
||||
return img_list
|
||||
|
||||
class HistDissimSubprocessor(Subprocessor):
|
||||
|
@ -344,7 +344,7 @@ class HistDissimSubprocessor(Subprocessor):
|
|||
self.img_list_len = len(self.img_list)
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
def process_data(self, data):
|
||||
i = data[0]
|
||||
score_total = 0
|
||||
for j in range( 0, self.img_list_len):
|
||||
|
@ -358,40 +358,40 @@ class HistDissimSubprocessor(Subprocessor):
|
|||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return self.img_list[data[0]][0]
|
||||
|
||||
|
||||
#override
|
||||
def __init__(self, img_list ):
|
||||
def __init__(self, img_list ):
|
||||
self.img_list = img_list
|
||||
self.img_list_range = [i for i in range(0, len(img_list) )]
|
||||
self.result = []
|
||||
super().__init__('HistDissim', HistDissimSubprocessor.Cli, 60)
|
||||
super().__init__('HistDissim', HistDissimSubprocessor.Cli, 60)
|
||||
|
||||
#override
|
||||
def on_clients_initialized(self):
|
||||
io.progress_bar ("Sorting", len (self.img_list) )
|
||||
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
def process_info_generator(self):
|
||||
for i in range(0, min(multiprocessing.cpu_count(), 8) ):
|
||||
yield 'CPU%d' % (i), {}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'device_name': 'CPU%d' % (i),
|
||||
'img_list' : self.img_list
|
||||
}
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.img_list_range) > 0:
|
||||
if len (self.img_list_range) > 0:
|
||||
return [self.img_list_range.pop(0)]
|
||||
|
||||
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.img_list_range.insert(0, data[0])
|
||||
|
||||
self.img_list_range.insert(0, data[0])
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
self.img_list[data[0]][2] = result
|
||||
|
@ -400,7 +400,7 @@ class HistDissimSubprocessor(Subprocessor):
|
|||
#override
|
||||
def get_result(self):
|
||||
return self.img_list
|
||||
|
||||
|
||||
def sort_by_hist_dissim(input_path):
|
||||
io.log_info ("Sorting by histogram dissimilarity...")
|
||||
|
||||
|
@ -408,19 +408,19 @@ def sort_by_hist_dissim(input_path):
|
|||
trash_img_list = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
trash_img_list.append ([str(filepath)])
|
||||
continue
|
||||
|
||||
|
||||
image = cv2_imread(str(filepath))
|
||||
face_mask = LandmarksProcessor.get_image_hull_mask (image.shape, dflimg.get_landmarks())
|
||||
image = (image*face_mask).astype(np.uint8)
|
||||
|
@ -428,26 +428,26 @@ def sort_by_hist_dissim(input_path):
|
|||
img_list.append ([str(filepath), cv2.calcHist([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]), 0 ])
|
||||
|
||||
img_list = HistDissimSubprocessor(img_list).run()
|
||||
|
||||
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
def sort_by_brightness(input_path):
|
||||
io.log_info ("Sorting by brightness...")
|
||||
img_list = [ [x, np.mean ( cv2.cvtColor(cv2_imread(x), cv2.COLOR_BGR2HSV)[...,2].flatten() )] for x in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading") ]
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
return img_list
|
||||
|
||||
|
||||
def sort_by_hue(input_path):
|
||||
io.log_info ("Sorting by hue...")
|
||||
img_list = [ [x, np.mean ( cv2.cvtColor(cv2_imread(x), cv2.COLOR_BGR2HSV)[...,0].flatten() )] for x in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading") ]
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
return img_list
|
||||
|
||||
|
||||
def sort_by_black(input_path):
|
||||
io.log_info ("Sorting by amount of black pixels...")
|
||||
|
||||
|
@ -460,22 +460,22 @@ def sort_by_black(input_path):
|
|||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=False)
|
||||
|
||||
return img_list
|
||||
|
||||
|
||||
def sort_by_origname(input_path):
|
||||
io.log_info ("Sort by original filename...")
|
||||
|
||||
|
||||
img_list = []
|
||||
trash_img_list = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
trash_img_list.append( [str(filepath)] )
|
||||
|
@ -486,7 +486,7 @@ def sort_by_origname(input_path):
|
|||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1))
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
def sort_by_oneface_in_image(input_path):
|
||||
io.log_info ("Sort by one face in images...")
|
||||
image_paths = Path_utils.get_image_paths(input_path)
|
||||
|
@ -503,17 +503,17 @@ def sort_by_oneface_in_image(input_path):
|
|||
trash_img_list = [ (image_paths[x],) for x in idxs ]
|
||||
return img_list, trash_img_list
|
||||
return [], []
|
||||
|
||||
|
||||
class FinalLoaderSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
def on_initialize(self, client_dict):
|
||||
self.log_info ('Running on %s.' % (client_dict['device_name']) )
|
||||
self.include_by_blur = client_dict['include_by_blur']
|
||||
|
||||
|
||||
#override
|
||||
def process_data(self, data):
|
||||
filepath = Path(data[0])
|
||||
def process_data(self, data):
|
||||
filepath = Path(data[0])
|
||||
|
||||
try:
|
||||
if filepath.suffix == '.png':
|
||||
|
@ -522,40 +522,40 @@ class FinalLoaderSubprocessor(Subprocessor):
|
|||
dflimg = DFLJPG.load( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
self.log_err("%s is not a dfl image file" % (filepath.name))
|
||||
return [ 1, [str(filepath)] ]
|
||||
|
||||
|
||||
bgr = cv2_imread(str(filepath))
|
||||
if bgr is None:
|
||||
raise Exception ("Unable to load %s" % (filepath.name) )
|
||||
|
||||
raise Exception ("Unable to load %s" % (filepath.name) )
|
||||
|
||||
gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
|
||||
sharpness = estimate_sharpness(gray) if self.include_by_blur else 0
|
||||
pitch, yaw = LandmarksProcessor.estimate_pitch_yaw ( dflimg.get_landmarks() )
|
||||
|
||||
|
||||
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
|
||||
except Exception as e:
|
||||
self.log_err (e)
|
||||
return [ 1, [str(filepath)] ]
|
||||
|
||||
|
||||
return [ 0, [str(filepath), sharpness, hist, yaw ] ]
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
#return string identificator of your data
|
||||
return data[0]
|
||||
|
||||
|
||||
#override
|
||||
def __init__(self, img_list, include_by_blur ):
|
||||
def __init__(self, img_list, include_by_blur ):
|
||||
self.img_list = img_list
|
||||
|
||||
self.include_by_blur = include_by_blur
|
||||
self.result = []
|
||||
self.result_trash = []
|
||||
|
||||
super().__init__('FinalLoader', FinalLoaderSubprocessor.Cli, 60)
|
||||
super().__init__('FinalLoader', FinalLoaderSubprocessor.Cli, 60)
|
||||
|
||||
#override
|
||||
def on_clients_initialized(self):
|
||||
|
@ -564,9 +564,9 @@ class FinalLoaderSubprocessor(Subprocessor):
|
|||
#override
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
def process_info_generator(self):
|
||||
for i in range(0, min(multiprocessing.cpu_count(), 8) ):
|
||||
yield 'CPU%d' % (i), {}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i),
|
||||
|
@ -575,15 +575,15 @@ class FinalLoaderSubprocessor(Subprocessor):
|
|||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len (self.img_list) > 0:
|
||||
if len (self.img_list) > 0:
|
||||
return [self.img_list.pop(0)]
|
||||
|
||||
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.img_list.insert(0, data[0])
|
||||
|
||||
self.img_list.insert(0, data[0])
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
if result[0] == 0:
|
||||
|
@ -599,7 +599,7 @@ class FinalLoaderSubprocessor(Subprocessor):
|
|||
class FinalHistDissimSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
#override
|
||||
def on_initialize(self, client_dict):
|
||||
def on_initialize(self, client_dict):
|
||||
self.log_info ('Running on %s.' % (client_dict['device_name']) )
|
||||
|
||||
#override
|
||||
|
@ -611,25 +611,25 @@ class FinalHistDissimSubprocessor(Subprocessor):
|
|||
if i == j:
|
||||
continue
|
||||
score_total += cv2.compareHist(img_list[i][2], img_list[j][2], cv2.HISTCMP_BHATTACHARYYA)
|
||||
img_list[i][3] = score_total
|
||||
img_list[i][3] = score_total
|
||||
img_list = sorted(img_list, key=operator.itemgetter(3), reverse=True)
|
||||
return idx, img_list
|
||||
return idx, img_list
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
return "Bunch of images"
|
||||
|
||||
#override
|
||||
def __init__(self, yaws_sample_list ):
|
||||
self.yaws_sample_list = yaws_sample_list
|
||||
self.yaws_sample_list_len = len(yaws_sample_list)
|
||||
|
||||
self.yaws_sample_list_idxs = [ i for i in range(self.yaws_sample_list_len) if self.yaws_sample_list[i] is not None ]
|
||||
self.result = [ None for _ in range(self.yaws_sample_list_len) ]
|
||||
super().__init__('FinalHistDissimSubprocessor', FinalHistDissimSubprocessor.Cli)
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
def __init__(self, yaws_sample_list ):
|
||||
self.yaws_sample_list = yaws_sample_list
|
||||
self.yaws_sample_list_len = len(yaws_sample_list)
|
||||
|
||||
self.yaws_sample_list_idxs = [ i for i in range(self.yaws_sample_list_len) if self.yaws_sample_list[i] is not None ]
|
||||
self.result = [ None for _ in range(self.yaws_sample_list_len) ]
|
||||
super().__init__('FinalHistDissimSubprocessor', FinalHistDissimSubprocessor.Cli)
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
for i in range(min(multiprocessing.cpu_count(), 8) ):
|
||||
yield 'CPU%d' % (i), {'i':i}, {'device_idx': i,
|
||||
'device_name': 'CPU%d' % (i)
|
||||
|
@ -637,38 +637,38 @@ class FinalHistDissimSubprocessor(Subprocessor):
|
|||
#override
|
||||
def on_clients_initialized(self):
|
||||
io.progress_bar ("Sort by hist-dissim", self.yaws_sample_list_len)
|
||||
|
||||
|
||||
#override
|
||||
def on_clients_finalized(self):
|
||||
io.progress_bar_close()
|
||||
|
||||
|
||||
#override
|
||||
def get_data(self, host_dict):
|
||||
def get_data(self, host_dict):
|
||||
if len (self.yaws_sample_list_idxs) > 0:
|
||||
idx = self.yaws_sample_list_idxs.pop(0)
|
||||
|
||||
|
||||
return idx, self.yaws_sample_list[idx]
|
||||
return None
|
||||
|
||||
|
||||
#override
|
||||
def on_data_return (self, host_dict, data):
|
||||
self.yaws_sample_list_idxs.insert(0, data[0])
|
||||
|
||||
|
||||
#override
|
||||
def on_result (self, host_dict, data, result):
|
||||
idx, yaws_sample_list = data
|
||||
idx, yaws_sample_list = data
|
||||
self.result[idx] = yaws_sample_list
|
||||
io.progress_bar_inc(1)
|
||||
|
||||
#override
|
||||
def get_result(self):
|
||||
return self.result
|
||||
|
||||
|
||||
def sort_final(input_path, include_by_blur=True):
|
||||
io.log_info ("Performing final sort.")
|
||||
|
||||
|
||||
target_count = io.input_int ("Target number of images? (default:2000) : ", 2000)
|
||||
|
||||
|
||||
img_list, trash_img_list = FinalLoaderSubprocessor( Path_utils.get_image_paths(input_path), include_by_blur ).run()
|
||||
final_img_list = []
|
||||
|
||||
|
@ -676,12 +676,12 @@ def sort_final(input_path, include_by_blur=True):
|
|||
imgs_per_grad = round (target_count / grads)
|
||||
|
||||
grads_space = np.linspace (-1.0,1.0,grads)
|
||||
|
||||
|
||||
yaws_sample_list = [None]*grads
|
||||
for g in io.progress_bar_generator ( range(grads), "Sort by yaw"):
|
||||
for g in io.progress_bar_generator ( range(grads), "Sort by yaw"):
|
||||
yaw = grads_space[g]
|
||||
next_yaw = grads_space[g+1] if g < grads-1 else yaw
|
||||
|
||||
|
||||
yaw_samples = []
|
||||
for img in img_list:
|
||||
s_yaw = -img[3]
|
||||
|
@ -691,17 +691,17 @@ def sort_final(input_path, include_by_blur=True):
|
|||
yaw_samples += [ img ]
|
||||
if len(yaw_samples) > 0:
|
||||
yaws_sample_list[g] = yaw_samples
|
||||
|
||||
|
||||
total_lack = 0
|
||||
for g in io.progress_bar_generator ( range(grads), ""):
|
||||
img_list = yaws_sample_list[g]
|
||||
img_list_len = len(img_list) if img_list is not None else 0
|
||||
|
||||
lack = imgs_per_grad - img_list_len
|
||||
total_lack += max(lack, 0)
|
||||
|
||||
imgs_per_grad += total_lack // grads
|
||||
|
||||
lack = imgs_per_grad - img_list_len
|
||||
total_lack += max(lack, 0)
|
||||
|
||||
imgs_per_grad += total_lack // grads
|
||||
|
||||
if include_by_blur:
|
||||
sharpned_imgs_per_grad = imgs_per_grad*10
|
||||
for g in io.progress_bar_generator ( range (grads), "Sort by blur"):
|
||||
|
@ -709,47 +709,47 @@ def sort_final(input_path, include_by_blur=True):
|
|||
if img_list is None:
|
||||
continue
|
||||
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
if len(img_list) > sharpned_imgs_per_grad:
|
||||
trash_img_list += img_list[sharpned_imgs_per_grad:]
|
||||
img_list = img_list[0:sharpned_imgs_per_grad]
|
||||
|
||||
|
||||
yaws_sample_list[g] = img_list
|
||||
|
||||
|
||||
yaws_sample_list = FinalHistDissimSubprocessor(yaws_sample_list).run()
|
||||
|
||||
for g in io.progress_bar_generator ( range (grads), "Fetching best"):
|
||||
img_list = yaws_sample_list[g]
|
||||
if img_list is None:
|
||||
continue
|
||||
|
||||
|
||||
final_img_list += img_list[0:imgs_per_grad]
|
||||
trash_img_list += img_list[imgs_per_grad:]
|
||||
|
||||
return final_img_list, trash_img_list
|
||||
|
||||
|
||||
def final_process(input_path, img_list, trash_img_list):
|
||||
if len(trash_img_list) != 0:
|
||||
parent_input_path = input_path.parent
|
||||
trash_path = parent_input_path / (input_path.stem + '_trash')
|
||||
trash_path.mkdir (exist_ok=True)
|
||||
|
||||
io.log_info ("Trashing %d items to %s" % ( len(trash_img_list), str(trash_path) ) )
|
||||
|
||||
|
||||
io.log_info ("Trashing %d items to %s" % ( len(trash_img_list), str(trash_path) ) )
|
||||
|
||||
for filename in Path_utils.get_image_paths(trash_path):
|
||||
Path(filename).unlink()
|
||||
|
||||
for i in io.progress_bar_generator( range(len(trash_img_list)), "Moving trash", leave=False):
|
||||
src = Path (trash_img_list[i][0])
|
||||
src = Path (trash_img_list[i][0])
|
||||
dst = trash_path / src.name
|
||||
try:
|
||||
src.rename (dst)
|
||||
except:
|
||||
io.log_info ('fail to trashing %s' % (src.name) )
|
||||
|
||||
|
||||
io.log_info ("")
|
||||
|
||||
|
||||
if len(img_list) != 0:
|
||||
for i in io.progress_bar_generator( [*range(len(img_list))], "Renaming", leave=False):
|
||||
src = Path (img_list[i][0])
|
||||
|
@ -758,24 +758,24 @@ def final_process(input_path, img_list, trash_img_list):
|
|||
src.rename (dst)
|
||||
except:
|
||||
io.log_info ('fail to rename %s' % (src.name) )
|
||||
|
||||
|
||||
for i in io.progress_bar_generator( [*range(len(img_list))], "Renaming"):
|
||||
src = Path (img_list[i][0])
|
||||
src = Path (img_list[i][0])
|
||||
src = input_path / ('%.5d_%s' % (i, src.name))
|
||||
dst = input_path / ('%.5d%s' % (i, src.suffix))
|
||||
try:
|
||||
src.rename (dst)
|
||||
except:
|
||||
io.log_info ('fail to rename %s' % (src.name) )
|
||||
io.log_info ('fail to rename %s' % (src.name) )
|
||||
|
||||
|
||||
|
||||
|
||||
def main (input_path, sort_by_method):
|
||||
input_path = Path(input_path)
|
||||
sort_by_method = sort_by_method.lower()
|
||||
|
||||
io.log_info ("Running sort tool.\r\n")
|
||||
|
||||
|
||||
img_list = []
|
||||
trash_img_list = []
|
||||
if sort_by_method == 'blur': img_list, trash_img_list = sort_by_blur (input_path)
|
||||
|
@ -787,10 +787,10 @@ def main (input_path, sort_by_method):
|
|||
elif sort_by_method == 'hist-dissim': img_list, trash_img_list = sort_by_hist_dissim (input_path)
|
||||
elif sort_by_method == 'brightness': img_list = sort_by_brightness (input_path)
|
||||
elif sort_by_method == 'hue': img_list = sort_by_hue (input_path)
|
||||
elif sort_by_method == 'black': img_list = sort_by_black (input_path)
|
||||
elif sort_by_method == 'black': img_list = sort_by_black (input_path)
|
||||
elif sort_by_method == 'origname': img_list, trash_img_list = sort_by_origname (input_path)
|
||||
elif sort_by_method == 'oneface': img_list, trash_img_list = sort_by_oneface_in_image (input_path)
|
||||
elif sort_by_method == 'final': img_list, trash_img_list = sort_final (input_path)
|
||||
elif sort_by_method == 'oneface': img_list, trash_img_list = sort_by_oneface_in_image (input_path)
|
||||
elif sort_by_method == 'final': img_list, trash_img_list = sort_final (input_path)
|
||||
elif sort_by_method == 'final-no-blur': img_list, trash_img_list = sort_final (input_path, include_by_blur=False)
|
||||
|
||||
|
||||
final_process (input_path, img_list, trash_img_list)
|
||||
|
|
|
@ -7,39 +7,39 @@ import numpy as np
|
|||
import itertools
|
||||
from pathlib import Path
|
||||
from utils import Path_utils
|
||||
from utils import image_utils
|
||||
from utils import image_utils
|
||||
import cv2
|
||||
import models
|
||||
from interact import interact as io
|
||||
|
||||
def trainerThread (s2c, c2s, args, device_args):
|
||||
while True:
|
||||
try:
|
||||
try:
|
||||
training_data_src_path = Path( args.get('training_data_src_dir', '') )
|
||||
training_data_dst_path = Path( args.get('training_data_dst_dir', '') )
|
||||
model_path = Path( args.get('model_path', '') )
|
||||
model_name = args.get('model_name', '')
|
||||
save_interval_min = 15
|
||||
save_interval_min = 15
|
||||
debug = args.get('debug', '')
|
||||
|
||||
|
||||
if not training_data_src_path.exists():
|
||||
io.log_err('Training data src directory does not exist.')
|
||||
break
|
||||
|
||||
|
||||
if not training_data_dst_path.exists():
|
||||
io.log_err('Training data dst directory does not exist.')
|
||||
break
|
||||
|
||||
|
||||
if not model_path.exists():
|
||||
model_path.mkdir(exist_ok=True)
|
||||
|
||||
|
||||
model = models.import_model(model_name)(
|
||||
model_path,
|
||||
training_data_src_path=training_data_src_path,
|
||||
training_data_dst_path=training_data_dst_path,
|
||||
model_path,
|
||||
training_data_src_path=training_data_src_path,
|
||||
training_data_dst_path=training_data_dst_path,
|
||||
debug=debug,
|
||||
device_args=device_args)
|
||||
|
||||
|
||||
is_reached_goal = model.is_reached_iter_goal()
|
||||
is_upd_save_time_after_train = False
|
||||
loss_string = ""
|
||||
|
@ -49,37 +49,37 @@ def trainerThread (s2c, c2s, args, device_args):
|
|||
model.save()
|
||||
io.log_info(loss_string)
|
||||
is_upd_save_time_after_train = True
|
||||
|
||||
|
||||
def send_preview():
|
||||
if not debug:
|
||||
previews = model.get_previews()
|
||||
if not debug:
|
||||
previews = model.get_previews()
|
||||
c2s.put ( {'op':'show', 'previews': previews, 'iter':model.get_iter(), 'loss_history': model.get_loss_history().copy() } )
|
||||
else:
|
||||
previews = [( 'debug, press update for new', model.debug_one_iter())]
|
||||
c2s.put ( {'op':'show', 'previews': previews} )
|
||||
|
||||
|
||||
|
||||
|
||||
if model.is_first_run():
|
||||
model_save()
|
||||
|
||||
|
||||
if model.get_target_iter() != 0:
|
||||
if is_reached_goal:
|
||||
io.log_info('Model already trained to target iteration. You can use preview.')
|
||||
else:
|
||||
io.log_info('Starting. Target iteration: %d. Press "Enter" to stop training and save model.' % ( model.get_target_iter() ) )
|
||||
else:
|
||||
else:
|
||||
io.log_info('Starting. Press "Enter" to stop training and save model.')
|
||||
|
||||
|
||||
last_save_time = time.time()
|
||||
|
||||
|
||||
for i in itertools.count(0,1):
|
||||
if not debug:
|
||||
if not is_reached_goal:
|
||||
loss_string = model.train_one_iter()
|
||||
loss_string = model.train_one_iter()
|
||||
if is_upd_save_time_after_train:
|
||||
#save resets plaidML programs, so upd last_save_time only after plaidML rebuild them
|
||||
last_save_time = time.time()
|
||||
|
||||
|
||||
io.log_info (loss_string, end='\r')
|
||||
if model.get_target_iter() != 0 and model.is_reached_iter_goal():
|
||||
io.log_info ('Reached target iteration.')
|
||||
|
@ -91,77 +91,77 @@ def trainerThread (s2c, c2s, args, device_args):
|
|||
last_save_time = time.time()
|
||||
model_save()
|
||||
send_preview()
|
||||
|
||||
|
||||
if i==0:
|
||||
if is_reached_goal:
|
||||
model.pass_one_iter()
|
||||
model.pass_one_iter()
|
||||
send_preview()
|
||||
|
||||
|
||||
if debug:
|
||||
time.sleep(0.005)
|
||||
|
||||
|
||||
while not s2c.empty():
|
||||
input = s2c.get()
|
||||
op = input['op']
|
||||
if op == 'save':
|
||||
model_save()
|
||||
elif op == 'preview':
|
||||
elif op == 'preview':
|
||||
if is_reached_goal:
|
||||
model.pass_one_iter()
|
||||
model.pass_one_iter()
|
||||
send_preview()
|
||||
elif op == 'close':
|
||||
model_save()
|
||||
i = -1
|
||||
break
|
||||
|
||||
|
||||
if i == -1:
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
model.finalize()
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print ('Error: %s' % (str(e)))
|
||||
traceback.print_exc()
|
||||
break
|
||||
c2s.put ( {'op':'close'} )
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main(args, device_args):
|
||||
io.log_info ("Running trainer.\r\n")
|
||||
|
||||
|
||||
no_preview = args.get('no_preview', False)
|
||||
|
||||
|
||||
s2c = queue.Queue()
|
||||
c2s = queue.Queue()
|
||||
|
||||
|
||||
thread = threading.Thread(target=trainerThread, args=(s2c, c2s, args, device_args) )
|
||||
thread.start()
|
||||
|
||||
if no_preview:
|
||||
while True:
|
||||
while True:
|
||||
if not c2s.empty():
|
||||
input = c2s.get()
|
||||
op = input.get('op','')
|
||||
if op == 'close':
|
||||
break
|
||||
io.process_messages(0.1)
|
||||
else:
|
||||
else:
|
||||
wnd_name = "Training preview"
|
||||
io.named_window(wnd_name)
|
||||
io.capture_keys(wnd_name)
|
||||
|
||||
|
||||
previews = None
|
||||
loss_history = None
|
||||
selected_preview = 0
|
||||
update_preview = False
|
||||
is_showing = False
|
||||
is_waiting_preview = False
|
||||
show_last_history_iters_count = 0
|
||||
show_last_history_iters_count = 0
|
||||
iter = 0
|
||||
while True:
|
||||
while True:
|
||||
if not c2s.empty():
|
||||
input = c2s.get()
|
||||
op = input['op']
|
||||
|
@ -177,7 +177,7 @@ def main(args, device_args):
|
|||
(h, w, c) = preview_rgb.shape
|
||||
max_h = max (max_h, h)
|
||||
max_w = max (max_w, w)
|
||||
|
||||
|
||||
max_size = 800
|
||||
if max_h > max_size:
|
||||
max_w = int( max_w / (max_h / max_size) )
|
||||
|
@ -194,49 +194,49 @@ def main(args, device_args):
|
|||
update_preview = True
|
||||
elif op == 'close':
|
||||
break
|
||||
|
||||
|
||||
if update_preview:
|
||||
update_preview = False
|
||||
|
||||
selected_preview_name = previews[selected_preview][0]
|
||||
selected_preview_rgb = previews[selected_preview][1]
|
||||
(h,w,c) = selected_preview_rgb.shape
|
||||
|
||||
|
||||
# HEAD
|
||||
head_lines = [
|
||||
'[s]:save [enter]:exit',
|
||||
'[p]:update [space]:next preview [l]:change history range',
|
||||
'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) )
|
||||
]
|
||||
]
|
||||
head_line_height = 15
|
||||
head_height = len(head_lines) * head_line_height
|
||||
head = np.ones ( (head_height,w,c) ) * 0.1
|
||||
|
||||
|
||||
for i in range(0, len(head_lines)):
|
||||
t = i*head_line_height
|
||||
b = (i+1)*head_line_height
|
||||
head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=[0.8]*c )
|
||||
|
||||
|
||||
final = head
|
||||
|
||||
if loss_history is not None:
|
||||
|
||||
if loss_history is not None:
|
||||
if show_last_history_iters_count == 0:
|
||||
loss_history_to_show = loss_history
|
||||
else:
|
||||
loss_history_to_show = loss_history[-show_last_history_iters_count:]
|
||||
|
||||
|
||||
lh_img = models.ModelBase.get_loss_history_preview(loss_history_to_show, iter, w, c)
|
||||
final = np.concatenate ( [final, lh_img], axis=0 )
|
||||
|
||||
final = np.concatenate ( [final, selected_preview_rgb], axis=0 )
|
||||
final = np.clip(final, 0, 1)
|
||||
|
||||
|
||||
io.show_image( wnd_name, (final*255).astype(np.uint8) )
|
||||
is_showing = True
|
||||
|
||||
|
||||
key_events = io.get_key_events(wnd_name)
|
||||
key, = key_events[-1] if len(key_events) > 0 else (0,)
|
||||
|
||||
|
||||
if key == ord('\n') or key == ord('\r'):
|
||||
s2c.put ( {'op': 'close'} )
|
||||
elif key == ord('s'):
|
||||
|
@ -253,14 +253,14 @@ def main(args, device_args):
|
|||
elif show_last_history_iters_count == 10000:
|
||||
show_last_history_iters_count = 50000
|
||||
elif show_last_history_iters_count == 50000:
|
||||
show_last_history_iters_count = 100000
|
||||
show_last_history_iters_count = 100000
|
||||
elif show_last_history_iters_count == 100000:
|
||||
show_last_history_iters_count = 0
|
||||
show_last_history_iters_count = 0
|
||||
update_preview = True
|
||||
elif key == ord(' '):
|
||||
selected_preview = (selected_preview + 1) % len(previews)
|
||||
update_preview = True
|
||||
|
||||
|
||||
io.process_messages(0.1)
|
||||
|
||||
io.destroy_all_windows()
|
||||
|
||||
io.destroy_all_windows()
|
||||
|
|
|
@ -9,30 +9,30 @@ from interact import interact as io
|
|||
|
||||
def convert_png_to_jpg_file (filepath):
|
||||
filepath = Path(filepath)
|
||||
|
||||
if filepath.suffix != '.png':
|
||||
|
||||
if filepath.suffix != '.png':
|
||||
return
|
||||
|
||||
|
||||
dflpng = DFLPNG.load (str(filepath) )
|
||||
if dflpng is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
return
|
||||
|
||||
|
||||
dfl_dict = dflpng.getDFLDictData()
|
||||
|
||||
|
||||
img = cv2_imread (str(filepath))
|
||||
new_filepath = str(filepath.parent / (filepath.stem + '.jpg'))
|
||||
cv2_imwrite ( new_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 85])
|
||||
|
||||
DFLJPG.embed_data( new_filepath,
|
||||
DFLJPG.embed_data( new_filepath,
|
||||
face_type=dfl_dict.get('face_type', None),
|
||||
landmarks=dfl_dict.get('landmarks', None),
|
||||
source_filename=dfl_dict.get('source_filename', None),
|
||||
source_rect=dfl_dict.get('source_rect', None),
|
||||
source_landmarks=dfl_dict.get('source_landmarks', None) )
|
||||
|
||||
|
||||
filepath.unlink()
|
||||
|
||||
|
||||
def convert_png_to_jpg_folder (input_path):
|
||||
input_path = Path(input_path)
|
||||
|
||||
|
@ -41,73 +41,73 @@ def convert_png_to_jpg_folder (input_path):
|
|||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Converting"):
|
||||
filepath = Path(filepath)
|
||||
convert_png_to_jpg_file(filepath)
|
||||
|
||||
|
||||
def add_landmarks_debug_images(input_path):
|
||||
io.log_info ("Adding landmarks debug images...")
|
||||
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Processing"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
img = cv2_imread(str(filepath))
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
if img is not None:
|
||||
face_landmarks = dflimg.get_landmarks()
|
||||
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True)
|
||||
|
||||
|
||||
output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
|
||||
cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||
|
||||
|
||||
def recover_original_aligned_filename(input_path):
|
||||
io.log_info ("Recovering original aligned filename...")
|
||||
|
||||
|
||||
files = []
|
||||
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Processing"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
if filepath.suffix == '.png':
|
||||
dflimg = DFLPNG.load( str(filepath) )
|
||||
elif filepath.suffix == '.jpg':
|
||||
dflimg = DFLJPG.load ( str(filepath) )
|
||||
else:
|
||||
dflimg = None
|
||||
|
||||
|
||||
if dflimg is None:
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
|
||||
|
||||
files += [ [filepath, None, dflimg.get_source_filename(), False] ]
|
||||
|
||||
|
||||
files_len = len(files)
|
||||
for i in io.progress_bar_generator( range(files_len), "Sorting" ):
|
||||
fp, _, sf, converted = files[i]
|
||||
|
||||
|
||||
if converted:
|
||||
continue
|
||||
|
||||
|
||||
sf_stem = Path(sf).stem
|
||||
|
||||
|
||||
files[i][1] = fp.parent / ( sf_stem + '_0' + fp.suffix )
|
||||
files[i][3] = True
|
||||
c = 1
|
||||
|
||||
|
||||
for j in range(i+1, files_len):
|
||||
fp_j, _, sf_j, converted_j = files[j]
|
||||
if converted_j:
|
||||
continue
|
||||
|
||||
|
||||
if sf_j == sf:
|
||||
files[j][1] = fp_j.parent / ( sf_stem + ('_%d' % (c)) + fp_j.suffix )
|
||||
files[j][1] = fp_j.parent / ( sf_stem + ('_%d' % (c)) + fp_j.suffix )
|
||||
files[j][3] = True
|
||||
c += 1
|
||||
|
||||
|
@ -118,11 +118,11 @@ def recover_original_aligned_filename(input_path):
|
|||
fs.rename (dst)
|
||||
except:
|
||||
io.log_err ('fail to rename %s' % (fs.name) )
|
||||
|
||||
|
||||
for file in io.progress_bar_generator( files, "Renaming" ):
|
||||
fs, fd, _, _ = file
|
||||
fs = fs.parent / ( fs.stem + '_tmp' + fs.suffix )
|
||||
try:
|
||||
fs.rename (fd)
|
||||
except:
|
||||
io.log_err ('fail to rename %s' % (fs.name) )
|
||||
io.log_err ('fail to rename %s' % (fs.name) )
|
||||
|
|
|
@ -8,38 +8,38 @@ from interact import interact as io
|
|||
def extract_video(input_file, output_dir, output_ext=None, fps=None):
|
||||
input_file_path = Path(input_file)
|
||||
output_path = Path(output_dir)
|
||||
|
||||
|
||||
if not output_path.exists():
|
||||
output_path.mkdir(exist_ok=True)
|
||||
|
||||
|
||||
|
||||
|
||||
if input_file_path.suffix == '.*':
|
||||
input_file_path = Path_utils.get_first_file_by_stem (input_file_path.parent, input_file_path.stem)
|
||||
else:
|
||||
if not input_file_path.exists():
|
||||
input_file_path = None
|
||||
|
||||
|
||||
if input_file_path is None:
|
||||
io.log_err("input_file not found.")
|
||||
return
|
||||
|
||||
|
||||
if output_ext is None:
|
||||
output_ext = io.input_str ("Output image format (extension)? ( default:png ) : ", "png")
|
||||
|
||||
|
||||
if fps is None:
|
||||
fps = io.input_int ("Enter FPS ( ?:help skip:fullfps ) : ", 0, help_message="How many frames of every second of the video will be extracted.")
|
||||
|
||||
|
||||
for filename in Path_utils.get_image_paths (output_path, ['.'+output_ext]):
|
||||
Path(filename).unlink()
|
||||
|
||||
job = ffmpeg.input(str(input_file_path))
|
||||
|
||||
kwargs = {}
|
||||
|
||||
kwargs = {}
|
||||
if fps != 0:
|
||||
kwargs.update ({'r':str(fps)})
|
||||
|
||||
job = job.output( str (output_path / ('%5d.'+output_ext)), **kwargs )
|
||||
|
||||
|
||||
try:
|
||||
job = job.run()
|
||||
except:
|
||||
|
@ -50,18 +50,18 @@ def cut_video ( input_file, from_time=None, to_time=None, audio_track_id=None, b
|
|||
if input_file_path is None:
|
||||
io.log_err("input_file not found.")
|
||||
return
|
||||
|
||||
|
||||
output_file_path = input_file_path.parent / (input_file_path.stem + "_cut" + input_file_path.suffix)
|
||||
|
||||
|
||||
if from_time is None:
|
||||
from_time = io.input_str ("From time (skip: 00:00:00.000) : ", "00:00:00.000")
|
||||
|
||||
|
||||
if to_time is None:
|
||||
to_time = io.input_str ("To time (skip: 00:00:00.000) : ", "00:00:00.000")
|
||||
|
||||
|
||||
if audio_track_id is None:
|
||||
audio_track_id = io.input_int ("Specify audio track id. ( skip:0 ) : ", 0)
|
||||
|
||||
|
||||
if bitrate is None:
|
||||
bitrate = max (1, io.input_int ("Bitrate of output file in MB/s ? (default:25) : ", 25) )
|
||||
|
||||
|
@ -69,64 +69,64 @@ def cut_video ( input_file, from_time=None, to_time=None, audio_track_id=None, b
|
|||
"b:v": "%dM" %(bitrate),
|
||||
"pix_fmt": "yuv420p",
|
||||
}
|
||||
|
||||
|
||||
job = ffmpeg.input(str(input_file_path), ss=from_time, to=to_time)
|
||||
|
||||
|
||||
job_v = job['v:0']
|
||||
job_a = job['a:' + str(audio_track_id) + '?' ]
|
||||
|
||||
|
||||
job = ffmpeg.output(job_v, job_a, str(output_file_path), **kwargs).overwrite_output()
|
||||
|
||||
|
||||
try:
|
||||
job = job.run()
|
||||
except:
|
||||
io.log_err ("ffmpeg fail, job commandline:" + str(job.compile()) )
|
||||
|
||||
|
||||
def denoise_image_sequence( input_dir, ext=None, factor=None ):
|
||||
input_path = Path(input_dir)
|
||||
|
||||
|
||||
if not input_path.exists():
|
||||
io.log_err("input_dir not found.")
|
||||
return
|
||||
|
||||
|
||||
if ext is None:
|
||||
ext = io.input_str ("Input image format (extension)? ( default:png ) : ", "png")
|
||||
|
||||
|
||||
if factor is None:
|
||||
factor = np.clip ( io.input_int ("Denoise factor? (1-20 default:5) : ", 5), 1, 20 )
|
||||
|
||||
|
||||
job = ( ffmpeg
|
||||
.input(str ( input_path / ('%5d.'+ext) ) )
|
||||
.filter("hqdn3d", factor, factor, 5,5)
|
||||
.output(str ( input_path / ('%5d.'+ext) ) )
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
try:
|
||||
job = job.run()
|
||||
except:
|
||||
io.log_err ("ffmpeg fail, job commandline:" + str(job.compile()) )
|
||||
|
||||
|
||||
def video_from_sequence( input_dir, output_file, reference_file=None, ext=None, fps=None, bitrate=None, lossless=None ):
|
||||
input_path = Path(input_dir)
|
||||
input_path = Path(input_dir)
|
||||
output_file_path = Path(output_file)
|
||||
reference_file_path = Path(reference_file) if reference_file is not None else None
|
||||
|
||||
|
||||
if not input_path.exists():
|
||||
io.log_err("input_dir not found.")
|
||||
return
|
||||
|
||||
|
||||
if not output_file_path.parent.exists():
|
||||
output_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
return
|
||||
|
||||
|
||||
out_ext = output_file_path.suffix
|
||||
|
||||
|
||||
if ext is None:
|
||||
ext = io.input_str ("Input image format (extension)? ( default:png ) : ", "png")
|
||||
|
||||
|
||||
if lossless is None:
|
||||
lossless = io.input_bool ("Use lossless codec ? ( default:no ) : ", False)
|
||||
|
||||
|
||||
video_id = None
|
||||
audio_id = None
|
||||
ref_in_a = None
|
||||
|
@ -136,7 +136,7 @@ def video_from_sequence( input_dir, output_file, reference_file=None, ext=None,
|
|||
else:
|
||||
if not reference_file_path.exists():
|
||||
reference_file_path = None
|
||||
|
||||
|
||||
if reference_file_path is None:
|
||||
io.log_err("reference_file not found.")
|
||||
return
|
||||
|
@ -149,32 +149,32 @@ def video_from_sequence( input_dir, output_file, reference_file=None, ext=None,
|
|||
if video_id is None and stream['codec_type'] == 'video':
|
||||
video_id = stream['index']
|
||||
fps = stream['r_frame_rate']
|
||||
|
||||
|
||||
if audio_id is None and stream['codec_type'] == 'audio':
|
||||
audio_id = stream['index']
|
||||
|
||||
if audio_id is not None:
|
||||
#has audio track
|
||||
ref_in_a = ffmpeg.input (str(reference_file_path))[str(audio_id)]
|
||||
|
||||
|
||||
if fps is None:
|
||||
#if fps not specified and not overwritten by reference-file
|
||||
fps = max (1, io.input_int ("FPS ? (default:25) : ", 25) )
|
||||
|
||||
|
||||
if not lossless and bitrate is None:
|
||||
bitrate = max (1, io.input_int ("Bitrate of output file in MB/s ? (default:16) : ", 16) )
|
||||
|
||||
|
||||
i_in = ffmpeg.input(str (input_path / ('%5d.'+ext)), r=fps)
|
||||
|
||||
|
||||
output_args = [i_in]
|
||||
|
||||
|
||||
if ref_in_a is not None:
|
||||
output_args += [ref_in_a]
|
||||
|
||||
|
||||
output_args += [str (output_file_path)]
|
||||
|
||||
|
||||
output_kwargs = {}
|
||||
|
||||
|
||||
if lossless:
|
||||
output_kwargs.update ({"c:v": "png"
|
||||
})
|
||||
|
@ -183,15 +183,14 @@ def video_from_sequence( input_dir, output_file, reference_file=None, ext=None,
|
|||
"b:v": "%dM" %(bitrate),
|
||||
"pix_fmt": "yuv420p",
|
||||
})
|
||||
|
||||
|
||||
output_kwargs.update ({"c:a": "aac",
|
||||
"b:a": "192k",
|
||||
"ar" : "48000"
|
||||
})
|
||||
|
||||
|
||||
job = ( ffmpeg.output(*output_args, **output_kwargs).overwrite_output() )
|
||||
try:
|
||||
job = job.run()
|
||||
except:
|
||||
io.log_err ("ffmpeg fail, job commandline:" + str(job.compile()) )
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue