DFLIMG refactoring

This commit is contained in:
Colombo 2020-03-21 01:18:15 +04:00
parent a9b23e9851
commit efe3b56683
12 changed files with 175 additions and 547 deletions

View file

@ -219,182 +219,6 @@ def extract_vggface2_dataset(input_dir, device_args={} ):
"""
class CelebAMASKHQSubprocessor(Subprocessor):
class Cli(Subprocessor.Cli):
#override
def on_initialize(self, client_dict):
self.masks_files_paths = client_dict['masks_files_paths']
return None
#override
def process_data(self, data):
filename = data[0]
dflimg = DFLIMG.load(Path(filename))
image_to_face_mat = dflimg.get_image_to_face_mat()
src_filename = dflimg.get_source_filename()
img = cv2_imread(filename)
h,w,c = img.shape
fanseg_mask = LandmarksProcessor.get_image_hull_mask(img.shape, dflimg.get_landmarks() )
idx_name = '%.5d' % int(src_filename.split('.')[0])
idx_files = [ x for x in self.masks_files_paths if idx_name in x ]
skin_files = [ x for x in idx_files if 'skin' in x ]
eye_glass_files = [ x for x in idx_files if 'eye_g' in x ]
for files, is_invert in [ (skin_files,False),
(eye_glass_files,True) ]:
if len(files) > 0:
mask = cv2_imread(files[0])
mask = mask[...,0]
mask[mask == 255] = 1
mask = mask.astype(np.float32)
mask = cv2.resize(mask, (1024,1024) )
mask = cv2.warpAffine(mask, image_to_face_mat, (w, h), cv2.INTER_LANCZOS4)
if not is_invert:
fanseg_mask *= mask[...,None]
else:
fanseg_mask *= (1-mask[...,None])
dflimg.embed_and_set (filename, fanseg_mask=fanseg_mask)
return 1
#override
def get_data_name (self, data):
#return string identificator of your data
return data[0]
#override
def __init__(self, image_paths, masks_files_paths ):
self.image_paths = image_paths
self.masks_files_paths = masks_files_paths
self.result = []
super().__init__('CelebAMASKHQSubprocessor', CelebAMASKHQSubprocessor.Cli, 60)
#override
def process_info_generator(self):
for i in range(min(multiprocessing.cpu_count(), 8)):
yield 'CPU%d' % (i), {}, {'masks_files_paths' : self.masks_files_paths }
#override
def on_clients_initialized(self):
io.progress_bar ("Processing", len (self.image_paths))
#override
def on_clients_finalized(self):
io.progress_bar_close()
#override
def get_data(self, host_dict):
if len (self.image_paths) > 0:
return [self.image_paths.pop(0)]
return None
#override
def on_data_return (self, host_dict, data):
self.image_paths.insert(0, data[0])
#override
def on_result (self, host_dict, data, result):
io.progress_bar_inc(1)
#override
def get_result(self):
return self.result
#unused in end user workflow
def apply_celebamaskhq(input_dir ):
input_path = Path(input_dir)
img_path = input_path / 'aligned'
mask_path = input_path / 'mask'
if not img_path.exists():
raise ValueError(f'{str(img_path)} directory not found. Please ensure it exists.')
CelebAMASKHQSubprocessor(pathex.get_image_paths(img_path),
pathex.get_image_paths(mask_path, subdirs=True) ).run()
return
paths_to_extract = []
for filename in io.progress_bar_generator(pathex.get_image_paths(img_path), desc="Processing"):
filepath = Path(filename)
dflimg = DFLIMG.load(filepath)
if dflimg is not None:
paths_to_extract.append (filepath)
image_to_face_mat = dflimg.get_image_to_face_mat()
src_filename = dflimg.get_source_filename()
#img = cv2_imread(filename)
h,w,c = dflimg.get_shape()
fanseg_mask = LandmarksProcessor.get_image_hull_mask( (h,w,c), dflimg.get_landmarks() )
idx_name = '%.5d' % int(src_filename.split('.')[0])
idx_files = [ x for x in masks_files if idx_name in x ]
skin_files = [ x for x in idx_files if 'skin' in x ]
eye_glass_files = [ x for x in idx_files if 'eye_g' in x ]
for files, is_invert in [ (skin_files,False),
(eye_glass_files,True) ]:
if len(files) > 0:
mask = cv2_imread(files[0])
mask = mask[...,0]
mask[mask == 255] = 1
mask = mask.astype(np.float32)
mask = cv2.resize(mask, (1024,1024) )
mask = cv2.warpAffine(mask, image_to_face_mat, (w, h), cv2.INTER_LANCZOS4)
if not is_invert:
fanseg_mask *= mask[...,None]
else:
fanseg_mask *= (1-mask[...,None])
#cv2.imshow("", (fanseg_mask*255).astype(np.uint8) )
#cv2.waitKey(0)
dflimg.embed_and_set (filename, fanseg_mask=fanseg_mask)
#import code
#code.interact(local=dict(globals(), **locals()))
#unused in end user workflow
def extract_fanseg(input_dir, device_args={} ):
multi_gpu = device_args.get('multi_gpu', False)
cpu_only = device_args.get('cpu_only', False)
input_path = Path(input_dir)
if not input_path.exists():
raise ValueError('Input directory not found. Please ensure it exists.')
paths_to_extract = []
for filename in pathex.get_image_paths(input_path) :
filepath = Path(filename)
dflimg = DFLIMG.load ( filepath )
if dflimg is not None:
paths_to_extract.append (filepath)
paths_to_extract_len = len(paths_to_extract)
if paths_to_extract_len > 0:
io.log_info ("Performing extract fanseg for %d files..." % (paths_to_extract_len) )
data = ExtractSubprocessor ([ ExtractSubprocessor.Data(filename) for filename in paths_to_extract ], 'fanseg', multi_gpu=multi_gpu, cpu_only=cpu_only).run()
#unused in end user workflow
def dev_test_68(input_dir ):
# process 68 landmarks dataset with .pts files
@ -451,13 +275,14 @@ def dev_test_68(input_dir ):
img = cv2_imread(filepath)
img = imagelib.normalize_channels(img, 3)
cv2_imwrite(output_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 95] )
DFLJPG.embed_data(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
landmarks=lmrks,
source_filename=filepath.name,
source_rect=rect,
source_landmarks=lmrks
)
raise Exception("unimplemented")
#DFLJPG.x(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
# landmarks=lmrks,
# source_filename=filepath.name,
# source_rect=rect,
# source_landmarks=lmrks
# )
io.log_info("Done.")
@ -544,11 +369,11 @@ def dev_test1(input_dir):
for filename in img_paths:
filepath = Path(filename)
dflimg = DFLIMG.load (filepath)
dflimg = DFLIMG.x (filepath)
if dflimg is None:
raise ValueError
dflimg.embed_and_set(filename, person_name=dir_name)
#dflimg.x(filename, person_name=dir_name)
#import code
#code.interact(local=dict(globals(), **locals()))
@ -587,98 +412,6 @@ def dev_segmented_trash(input_dir):
except:
io.log_info ('fail to trashing %s' % (src.name) )
def dev_segmented_extract(input_dir, output_dir ):
# extract and merge .json labelme files within the faces
device_config = nn.DeviceConfig.GPUIndexes( nn.ask_choose_device_idxs(suggest_all_gpu=True) )
input_path = Path(input_dir)
if not input_path.exists():
raise ValueError('input_dir not found. Please ensure it exists.')
output_path = Path(output_dir)
io.log_info("Performing extract segmented faces.")
io.log_info(f'Output dir is {output_path}')
if output_path.exists():
output_images_paths = pathex.get_image_paths(output_path, subdirs=True)
if len(output_images_paths) > 0:
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
for filename in output_images_paths:
Path(filename).unlink()
shutil.rmtree(str(output_path))
else:
output_path.mkdir(parents=True, exist_ok=True)
images_paths = pathex.get_image_paths(input_path, subdirs=True, return_Path_class=True)
extract_data = []
images_jsons = {}
images_processed = 0
for filepath in io.progress_bar_generator(images_paths, "Processing"):
json_filepath = filepath.parent / (filepath.stem+'.json')
if json_filepath.exists():
try:
json_dict = json.loads(json_filepath.read_text())
images_jsons[filepath] = json_dict
total_points = [ [x,y] for shape in json_dict['shapes'] for x,y in shape['points'] ]
total_points = np.array(total_points)
if len(total_points) == 0:
io.log_info(f"No points found in {json_filepath}, skipping.")
continue
l,r = int(total_points[:,0].min()), int(total_points[:,0].max())
t,b = int(total_points[:,1].min()), int(total_points[:,1].max())
force_output_path=output_path / filepath.relative_to(input_path).parent
force_output_path.mkdir(exist_ok=True, parents=True)
extract_data.append ( ExtractSubprocessor.Data(filepath,
rects=[ [l,t,r,b] ],
force_output_path=force_output_path ) )
images_processed += 1
except:
io.log_err(f"err {filepath}, {traceback.format_exc()}")
return
else:
io.log_info(f"No .json file for {filepath.relative_to(input_path)}, skipping.")
continue
image_size = 1024
face_type = FaceType.HEAD
extract_data = ExtractSubprocessor (extract_data, 'landmarks', image_size, face_type, device_config=device_config).run()
extract_data = ExtractSubprocessor (extract_data, 'final', image_size, face_type, device_config=device_config).run()
for data in extract_data:
filepath = data.force_output_path / (data.filepath.stem+'_0.jpg')
dflimg = DFLIMG.load(filepath)
image_to_face_mat = dflimg.get_image_to_face_mat()
json_dict = images_jsons[data.filepath]
ie_polys = IEPolys()
for shape in json_dict['shapes']:
ie_poly = ie_polys.add(1)
points = np.array( [ [x,y] for x,y in shape['points'] ] )
points = LandmarksProcessor.transform_points(points, image_to_face_mat)
for x,y in points:
ie_poly.add( int(x), int(y) )
dflimg.embed_and_set (filepath, ie_polys=ie_polys)
io.log_info(f"Images found: {len(images_paths)}")
io.log_info(f"Images processed: {images_processed}")
"""
#mark only
@ -699,7 +432,7 @@ for data in extract_data:
ie_poly.add( int(x), int(y) )
DFLJPG.embed_data(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
DFLJPG.x(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
landmarks=data.landmarks[0],
ie_polys=ie_polys,
source_filename=filepath.name,