From 1d4d3cd0e81fd2c30d4a62d8fb7715a9347b1739 Mon Sep 17 00:00:00 2001 From: iperov Date: Thu, 28 Oct 2021 09:31:30 +0400 Subject: [PATCH] _ --- main.py | 7 ++++++- scripts/dev.py | 36 ++++++++++++++---------------------- xlib/sjob/__init__.py | 2 +- 3 files changed, 21 insertions(+), 24 deletions(-) diff --git a/main.py b/main.py index d0d6f6a..55a0107 100644 --- a/main.py +++ b/main.py @@ -54,10 +54,15 @@ def main(): def run_extract_FaceSynthetics(args): from scripts import dev - dev.extract_FaceSynthetics(input_dir=args.input_dir) + + inputdir_path = Path(args.input_dir) + faceset_path = Path(args.faceset_path) + + dev.extract_FaceSynthetics(inputdir_path, faceset_path) p = dev_subparsers.add_parser('extract_FaceSynthetics') p.add_argument('--input-dir', default=None, action=fixPathAction, help="FaceSynthetics directory.") + p.add_argument('--faceset-path', default=None, action=fixPathAction, help="output .dfs path") p.set_defaults(func=run_extract_FaceSynthetics) train_parser = subparsers.add_parser( "train", help="Train neural network.") diff --git a/scripts/dev.py b/scripts/dev.py index 0d09cc8..64d2216 100644 --- a/scripts/dev.py +++ b/scripts/dev.py @@ -26,7 +26,7 @@ def split_large_files(delete_original=False): SplittedFile.split(filepath, part_size=part_size, delete_original=delete_original) print('Done') -def extract_FaceSynthetics(input_dir): +def extract_FaceSynthetics(inputdir_path : Path, faceset_path : Path): """ extract FaceSynthetics dataset https://github.com/microsoft/FaceSynthetics @@ -51,19 +51,11 @@ def extract_FaceSynthetics(input_dir): FACEWEAR = 18 IGNORE = 255 """ - input_path = Path(input_dir) - faceset_path = input_path.parent / f'{input_path.name}.dfs' - - # fs = lib_face.Faceset(output_dbpath) - # for ufm in fs.iter_UFaceMark(): - # uimg = fs.get_UImage_by_uuid( ufm.get_UImage_uuid() ) - # img = uimg.get_image() - - # cv2.imshow('', img) - # cv2.waitKey(0) - - filepaths = lib_path.get_files_paths(input_path)[:100] #TODO - + if faceset_path.suffix != '.dfs': + raise ValueError('faceset_path must have .dfs extension.') + + filepaths = lib_path.get_files_paths(inputdir_path) + fs = lib_face.Faceset(faceset_path) fs.recreate() @@ -75,7 +67,7 @@ def extract_FaceSynthetics(input_dir): if not image_filepath.exists(): print(f'{image_filepath} does not exist, skipping') - + img = lib_cv.imread(image_filepath) H,W,C = img.shape @@ -101,14 +93,14 @@ def extract_FaceSynthetics(input_dir): ufm.set_UImage_uuid(uimg.get_uuid()) ufm.set_FRect(flmrks.get_FRect()) ufm.add_FLandmarks2D(flmrks) - + fs.add_UImage(uimg, format='png') fs.add_UFaceMark(ufm) - - + + fs.shrink() fs.close() - + import code code.interact(local=dict(globals(), **locals())) @@ -122,10 +114,10 @@ def extract_FaceSynthetics(input_dir): # img_seg = img_seg[...,None] # if img_seg.shape[-1] != 1: # raise Exception(f'{seg_filepath} wrong mask file. Must be 1 channel.') - + # seg_hair = img_seg.copy() -# seg_hair_inds = np.isin(img_seg, [13]) +# seg_hair_inds = np.isin(img_seg, [13]) # seg_hair[~seg_hair_inds] = 0 # seg_hair[seg_hair_inds] = 255 @@ -141,7 +133,7 @@ def extract_FaceSynthetics(input_dir): # cv2.imshow('', seg_hair) # cv2.waitKey(0) -# img_seg_inds = np.isin(img_seg, [1,2,3,4,5,6,9,10,11,14]) +# img_seg_inds = np.isin(img_seg, [1,2,3,4,5,6,9,10,11,14]) # img_seg[~img_seg_inds] = 0 # img_seg[img_seg_inds] = 255 # import numpy as np diff --git a/xlib/sjob/__init__.py b/xlib/sjob/__init__.py index 61771c9..343f62b 100644 --- a/xlib/sjob/__init__.py +++ b/xlib/sjob/__init__.py @@ -1,5 +1,5 @@ """ -Job processing in subprocesses. +Job lib using subprocesses """ import multiprocessing