mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 04:52:13 -07:00
added dev_segmented_extract,
extracts marked images in 'labelme' tool, that can be used in FANseg training
This commit is contained in:
parent
143792fd31
commit
6f4ea69d4d
2 changed files with 69 additions and 51 deletions
11
main.py
11
main.py
|
@ -105,6 +105,17 @@ if __name__ == "__main__":
|
|||
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir")
|
||||
p.set_defaults (func=process_dev_test)
|
||||
|
||||
def process_dev_segmented_extract(arguments):
|
||||
osex.set_process_lowest_prio()
|
||||
from mainscripts import dev_misc
|
||||
dev_misc.dev_segmented_extract(arguments.input_dir, arguments.output_dir)
|
||||
|
||||
p = subparsers.add_parser( "dev_segmented_extract", help="")
|
||||
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir")
|
||||
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir")
|
||||
|
||||
p.set_defaults (func=process_dev_segmented_extract)
|
||||
|
||||
def process_sort(arguments):
|
||||
osex.set_process_lowest_prio()
|
||||
from mainscripts import Sorter
|
||||
|
|
|
@ -553,22 +553,19 @@ def dev_test1(input_dir):
|
|||
#import code
|
||||
#code.interact(local=dict(globals(), **locals()))
|
||||
|
||||
#unused in end user workflow
|
||||
def dev_test(input_dir ):
|
||||
|
||||
def dev_segmented_extract(input_dir, output_dir ):
|
||||
# extract and merge .json labelme files within the faces
|
||||
|
||||
|
||||
device_config = nn.DeviceConfig.GPUIndexes( nn.ask_choose_device_idxs(suggest_all_gpu=True) )
|
||||
|
||||
|
||||
|
||||
input_path = Path(input_dir)
|
||||
if not input_path.exists():
|
||||
raise ValueError('input_dir not found. Please ensure it exists.')
|
||||
|
||||
output_path = input_path.parent / (input_path.name+'_merged')
|
||||
|
||||
io.log_info(f'Output dir is % {output_path}')
|
||||
output_path = Path(output_dir)
|
||||
io.log_info("Performing extract segmented faces.")
|
||||
io.log_info(f'Output dir is {output_path}')
|
||||
|
||||
if output_path.exists():
|
||||
output_images_paths = pathex.get_image_paths(output_path)
|
||||
|
@ -582,27 +579,34 @@ def dev_test(input_dir ):
|
|||
images_paths = pathex.get_image_paths(input_path)
|
||||
|
||||
extract_data = []
|
||||
|
||||
images_jsons = {}
|
||||
images_processed = 0
|
||||
|
||||
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
filepath = Path(filepath)
|
||||
|
||||
|
||||
json_filepath = filepath.parent / (filepath.stem+'.json')
|
||||
|
||||
|
||||
if json_filepath.exists():
|
||||
try:
|
||||
json_dict = json.loads(json_filepath.read_text())
|
||||
images_jsons[filepath] = json_dict
|
||||
|
||||
total_points = [ [x,y] for shape in json_dict['shapes'] for x,y in shape['points'] ]
|
||||
total_points = np.array(total_points)
|
||||
|
||||
if len(total_points) == 0:
|
||||
io.log_info(f"No points found in {json_filepath}, skipping.")
|
||||
continue
|
||||
|
||||
l,r = int(total_points[:,0].min()), int(total_points[:,0].max())
|
||||
t,b = int(total_points[:,1].min()), int(total_points[:,1].max())
|
||||
|
||||
extract_data.append ( ExtractSubprocessor.Data(filepath, rects=[ [l,t,r,b] ] ) )
|
||||
images_processed += 1
|
||||
except:
|
||||
io.log_err(f"err {filepath}")
|
||||
return
|
||||
|
||||
image_size = 1024
|
||||
face_type = FaceType.HEAD
|
||||
|
@ -629,6 +633,11 @@ def dev_test(input_dir ):
|
|||
|
||||
dflimg.embed_and_set (filepath, ie_polys=ie_polys)
|
||||
|
||||
io.log_info(f"Images found: {len(images_paths)}")
|
||||
io.log_info(f"Images processed: {images_processed}")
|
||||
|
||||
|
||||
|
||||
"""
|
||||
#mark only
|
||||
for data in extract_data:
|
||||
|
@ -656,5 +665,3 @@ def dev_test(input_dir ):
|
|||
source_landmarks=data.landmarks[0]
|
||||
)
|
||||
"""
|
||||
|
||||
io.log_info("Done.")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue