mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 05:22:06 -07:00
fix
This commit is contained in:
parent
7e0bdc0369
commit
eb63466baf
3 changed files with 25 additions and 20 deletions
|
@ -40,7 +40,7 @@ class DFLJPG(object):
|
||||||
data_counter += 2
|
data_counter += 2
|
||||||
|
|
||||||
if chunk_m_l != 0xFF:
|
if chunk_m_l != 0xFF:
|
||||||
raise ValueError("No Valid JPG info")
|
raise ValueError(f"No Valid JPG info in {filename}")
|
||||||
|
|
||||||
chunk_name = None
|
chunk_name = None
|
||||||
chunk_size = None
|
chunk_size = None
|
||||||
|
@ -87,8 +87,9 @@ class DFLJPG(object):
|
||||||
else:
|
else:
|
||||||
is_unk_chunk = True
|
is_unk_chunk = True
|
||||||
|
|
||||||
if is_unk_chunk:
|
#if is_unk_chunk:
|
||||||
raise ValueError("Unknown chunk %X" % (chunk_m_h) )
|
# #raise ValueError(f"Unknown chunk {chunk_m_h} in {filename}")
|
||||||
|
# io.log_info(f"Unknown chunk {chunk_m_h} in {filename}")
|
||||||
|
|
||||||
if chunk_size == None: #variable size
|
if chunk_size == None: #variable size
|
||||||
chunk_size, = struct.unpack (">H", data[data_counter:data_counter+2])
|
chunk_size, = struct.unpack (">H", data[data_counter:data_counter+2])
|
||||||
|
@ -116,7 +117,7 @@ class DFLJPG(object):
|
||||||
|
|
||||||
return inst
|
return inst
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception ("Corrupted JPG file: %s" % (str(e)))
|
raise Exception (f"Corrupted JPG file {filename} {e}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(filename, loader_func=None):
|
def load(filename, loader_func=None):
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
|
import traceback
|
||||||
import json
|
import json
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import shutil
|
import shutil
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
@ -602,15 +602,16 @@ def dev_segmented_extract(input_dir, output_dir ):
|
||||||
io.log_info(f'Output dir is {output_path}')
|
io.log_info(f'Output dir is {output_path}')
|
||||||
|
|
||||||
if output_path.exists():
|
if output_path.exists():
|
||||||
output_images_paths = pathex.get_image_paths(output_path)
|
output_images_paths = pathex.get_image_paths(output_path, subdirs=True)
|
||||||
if len(output_images_paths) > 0:
|
if len(output_images_paths) > 0:
|
||||||
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
|
io.input_bool("WARNING !!! \n %s contains files! \n They will be deleted. \n Press enter to continue." % (str(output_path)), False )
|
||||||
for filename in output_images_paths:
|
for filename in output_images_paths:
|
||||||
Path(filename).unlink()
|
Path(filename).unlink()
|
||||||
|
shutil.rmtree(str(output_path))
|
||||||
else:
|
else:
|
||||||
output_path.mkdir(parents=True, exist_ok=True)
|
output_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
images_paths = pathex.get_image_paths(input_path)
|
images_paths = pathex.get_image_paths(input_path, subdirs=True, return_Path_class=True)
|
||||||
|
|
||||||
extract_data = []
|
extract_data = []
|
||||||
images_jsons = {}
|
images_jsons = {}
|
||||||
|
@ -618,7 +619,6 @@ def dev_segmented_extract(input_dir, output_dir ):
|
||||||
|
|
||||||
|
|
||||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||||
filepath = Path(filepath)
|
|
||||||
json_filepath = filepath.parent / (filepath.stem+'.json')
|
json_filepath = filepath.parent / (filepath.stem+'.json')
|
||||||
|
|
||||||
if json_filepath.exists():
|
if json_filepath.exists():
|
||||||
|
@ -636,19 +636,27 @@ def dev_segmented_extract(input_dir, output_dir ):
|
||||||
l,r = int(total_points[:,0].min()), int(total_points[:,0].max())
|
l,r = int(total_points[:,0].min()), int(total_points[:,0].max())
|
||||||
t,b = int(total_points[:,1].min()), int(total_points[:,1].max())
|
t,b = int(total_points[:,1].min()), int(total_points[:,1].max())
|
||||||
|
|
||||||
extract_data.append ( ExtractSubprocessor.Data(filepath, rects=[ [l,t,r,b] ] ) )
|
force_output_path=output_path / filepath.relative_to(input_path).parent
|
||||||
|
force_output_path.mkdir(exist_ok=True, parents=True)
|
||||||
|
|
||||||
|
extract_data.append ( ExtractSubprocessor.Data(filepath,
|
||||||
|
rects=[ [l,t,r,b] ],
|
||||||
|
force_output_path=force_output_path ) )
|
||||||
images_processed += 1
|
images_processed += 1
|
||||||
except:
|
except:
|
||||||
io.log_err(f"err {filepath}")
|
io.log_err(f"err {filepath}, {traceback.format_exc()}")
|
||||||
return
|
return
|
||||||
|
else:
|
||||||
|
io.log_info(f"No .json file for {filepath.relative_to(input_path)}, skipping.")
|
||||||
|
continue
|
||||||
|
|
||||||
image_size = 1024
|
image_size = 1024
|
||||||
face_type = FaceType.HEAD
|
face_type = FaceType.HEAD
|
||||||
extract_data = ExtractSubprocessor (extract_data, 'landmarks', image_size, face_type, device_config=device_config).run()
|
extract_data = ExtractSubprocessor (extract_data, 'landmarks', image_size, face_type, device_config=device_config).run()
|
||||||
extract_data = ExtractSubprocessor (extract_data, 'final', image_size, face_type, final_output_path=output_path, device_config=device_config).run()
|
extract_data = ExtractSubprocessor (extract_data, 'final', image_size, face_type, device_config=device_config).run()
|
||||||
|
|
||||||
for data in extract_data:
|
for data in extract_data:
|
||||||
filepath = output_path / (data.filepath.stem+'_0.jpg')
|
filepath = data.force_output_path / (data.filepath.stem+'_0.jpg')
|
||||||
|
|
||||||
dflimg = DFLIMG.load(filepath)
|
dflimg = DFLIMG.load(filepath)
|
||||||
image_to_face_mat = dflimg.get_image_to_face_mat()
|
image_to_face_mat = dflimg.get_image_to_face_mat()
|
||||||
|
|
|
@ -71,15 +71,11 @@ class SampleGeneratorFaceSkinSegDataset(SampleGeneratorBase):
|
||||||
self.initialized = False
|
self.initialized = False
|
||||||
|
|
||||||
|
|
||||||
dataset_path = root_path / 'XSegDataset'
|
aligned_path = root_path /'aligned'
|
||||||
if not dataset_path.exists():
|
|
||||||
raise ValueError(f'Unable to find {dataset_path}')
|
|
||||||
|
|
||||||
aligned_path = dataset_path /'aligned'
|
|
||||||
if not aligned_path.exists():
|
if not aligned_path.exists():
|
||||||
raise ValueError(f'Unable to find {aligned_path}')
|
raise ValueError(f'Unable to find {aligned_path}')
|
||||||
|
|
||||||
obstructions_path = dataset_path / 'obstructions'
|
obstructions_path = root_path / 'obstructions'
|
||||||
|
|
||||||
obstructions_images_paths = pathex.get_image_paths(obstructions_path, image_extensions=['.png'], subdirs=True)
|
obstructions_images_paths = pathex.get_image_paths(obstructions_path, image_extensions=['.png'], subdirs=True)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue