diff --git a/main.py b/main.py index 03b4f46..38d75de 100644 --- a/main.py +++ b/main.py @@ -88,23 +88,16 @@ if __name__ == "__main__": p = subparsers.add_parser( "dev_apply_celebamaskhq", help="") p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir") p.set_defaults (func=process_dev_apply_celebamaskhq) - """ - def process_extract_fanseg(arguments): + + def process_dev_test(arguments): os_utils.set_process_lowest_prio() - from mainscripts import Extractor - Extractor.extract_fanseg( arguments.input_dir, - device_args={'cpu_only' : arguments.cpu_only, - 'multi_gpu' : arguments.multi_gpu, - } - ) - - p = subparsers.add_parser( "extract_fanseg", help="Extract fanseg mask from faces.") - p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.") - p.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="Enables multi GPU.") - p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Extract on CPU.") - p.set_defaults (func=process_extract_fanseg) - """ + from mainscripts import dev_misc + dev_misc.dev_test( arguments.input_dir ) + p = subparsers.add_parser( "dev_test", help="") + p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir") + p.set_defaults (func=process_dev_test) + def process_sort(arguments): os_utils.set_process_lowest_prio() from mainscripts import Sorter diff --git a/mainscripts/dev_misc.py b/mainscripts/dev_misc.py index 4d4f32a..9e20e0e 100644 --- a/mainscripts/dev_misc.py +++ b/mainscripts/dev_misc.py @@ -469,3 +469,23 @@ def extract_umd_csv(input_file_csv, io.log_info ('Images found: %d' % (images_found) ) io.log_info ('Faces detected: %d' % (faces_detected) ) io.log_info ('-------------------------') + +def dev_test(input_dir): + input_path = Path(input_dir) + + dir_names = Path_utils.get_all_dir_names(input_path) + + for dir_name in dir_names: + + img_paths = Path_utils.get_image_paths (input_path / dir_name) + for filename in img_paths: + filepath = Path(filename) + + dflimg = DFLIMG.load (filepath) + if dflimg is None: + raise ValueError + + import code + code.interact(local=dict(globals(), **locals())) + + \ No newline at end of file diff --git a/samplelib/PackedFaceset.py b/samplelib/PackedFaceset.py index 62cc8ae..d791033 100644 --- a/samplelib/PackedFaceset.py +++ b/samplelib/PackedFaceset.py @@ -7,6 +7,7 @@ from utils import Path_utils import samplelib.SampleHost +from samplelib import Sample packed_faceset_filename = 'faceset.pak' @@ -16,23 +17,24 @@ class PackedFaceset(): @staticmethod def pack(samples_path): samples_dat_path = samples_path / packed_faceset_filename - + if samples_dat_path.exists(): io.log_info(f"{samples_dat_path} : file already exists !") io.input_bool("Press enter to continue and overwrite.", False) - + of = open(samples_dat_path, "wb") image_paths = Path_utils.get_image_paths(samples_path) - + samples = samplelib.SampleHost.load_face_samples(image_paths) - - for sample in samples: - sample.filename = str(Path(sample.filename).relative_to(samples_path)) samples_len = len(samples) - samples_bytes = pickle.dumps(samples, 4) + samples_configs = [] + for sample in samples: + sample.filename = str(Path(sample.filename).relative_to(samples_path)) + samples_configs.append ( sample.get_config() ) + samples_bytes = pickle.dumps(samples_configs, 4) of.write ( struct.pack ("Q", PackedFaceset.VERSION ) ) of.write ( struct.pack ("Q", len(samples_bytes) ) ) @@ -72,7 +74,7 @@ class PackedFaceset(): if not samples_dat_path.exists(): io.log_info(f"{samples_dat_path} : file not found.") return - + samples = PackedFaceset.load(samples_path) for sample in io.progress_bar_generator(samples, "Unpacking"): @@ -93,7 +95,12 @@ class PackedFaceset(): raise NotImplementedError sizeof_samples_bytes, = struct.unpack("Q", f.read(8) ) - samples = pickle.loads ( f.read(sizeof_samples_bytes) ) + + samples_configs = pickle.loads ( f.read(sizeof_samples_bytes) ) + samples = [] + for sample_config in samples_configs: + samples.append ( Sample (**sample_config) ) + offsets = [ struct.unpack("Q", f.read(8) )[0] for _ in range(len(samples)+1) ] data_start_offset = f.tell() f.close() @@ -104,4 +111,3 @@ class PackedFaceset(): return samples - \ No newline at end of file diff --git a/samplelib/Sample.py b/samplelib/Sample.py index 6e481e3..bb55b9f 100644 --- a/samplelib/Sample.py +++ b/samplelib/Sample.py @@ -6,58 +6,95 @@ import numpy as np from utils.cv2_utils import * from DFLIMG import * - +from facelib import LandmarksProcessor class SampleType(IntEnum): IMAGE = 0 #raw image FACE_BEGIN = 1 FACE = 1 #aligned face unsorted - FACE_TEMPORAL_SORTED = 2 #sorted by source filename - FACE_END = 2 + FACE_PERSON = 2 #aligned face person + FACE_TEMPORAL_SORTED = 3 #sorted by source filename + FACE_END = 3 - QTY = 5 + QTY = 4 class Sample(object): __slots__ = ['sample_type', 'filename', - 'person_id', 'face_type', 'shape', 'landmarks', 'ie_polys', - 'pitch_yaw_roll', 'eyebrows_expand_mod', 'source_filename', - 'mirror', - 'fanseg_mask_exist', - '_filename_offset_size', + 'person_name', + 'pitch_yaw_roll', + '_filename_offset_size', ] - - def __init__(self, sample_type=None, filename=None, person_id=None, face_type=None, shape=None, landmarks=None, ie_polys=None, pitch_yaw_roll=None, eyebrows_expand_mod=None, source_filename=None, mirror=None, fanseg_mask_exist=False): + + def __init__(self, sample_type=None, + filename=None, + face_type=None, + shape=None, + landmarks=None, + ie_polys=None, + eyebrows_expand_mod=None, + source_filename=None, + person_name=None, + pitch_yaw_roll=None, + **kwargs): + self.sample_type = sample_type if sample_type is not None else SampleType.IMAGE self.filename = filename - self.person_id = person_id self.face_type = face_type self.shape = shape self.landmarks = np.array(landmarks) if landmarks is not None else None self.ie_polys = ie_polys - self.pitch_yaw_roll = pitch_yaw_roll self.eyebrows_expand_mod = eyebrows_expand_mod self.source_filename = source_filename - self.mirror = mirror - self.fanseg_mask_exist = fanseg_mask_exist - - self._filename_offset_size = None + self.person_name = person_name + self.pitch_yaw_roll = pitch_yaw_roll + + def get_pitch_yaw_roll(self): + if self.pitch_yaw_roll is None: + self.pitch_yaw_roll = LandmarksProcessor.estimate_pitch_yaw_roll(landmarks) + return self.pitch_yaw_roll def set_filename_offset_size(self, filename, offset, size): self._filename_offset_size = (filename, offset, size) - def copy_and_set(self, sample_type=None, filename=None, person_id=None, face_type=None, shape=None, landmarks=None, ie_polys=None, pitch_yaw_roll=None, eyebrows_expand_mod=None, source_filename=None, mirror=None, fanseg_mask=None, fanseg_mask_exist=None): + def read_raw_file(self, filename=None): + if self._filename_offset_size is not None: + filename, offset, size = self._filename_offset_size + with open(filename, "rb") as f: + f.seek( offset, 0) + return f.read (size) + else: + with open(filename, "rb") as f: + return f.read() + + def load_bgr(self): + img = cv2_imread (self.filename, loader_func=self.read_raw_file).astype(np.float32) / 255.0 + return img + + def get_config(self): + return {'sample_type': self.sample_type, + 'filename': self.filename, + 'face_type': self.face_type, + 'shape': self.shape, + 'landmarks': self.landmarks.tolist(), + 'ie_polys': self.ie_polys, + 'eyebrows_expand_mod': self.eyebrows_expand_mod, + 'source_filename': self.source_filename, + 'person_name': self.person_name + } + +""" +def copy_and_set(self, sample_type=None, filename=None, face_type=None, shape=None, landmarks=None, ie_polys=None, pitch_yaw_roll=None, eyebrows_expand_mod=None, source_filename=None, fanseg_mask=None, person_name=None): return Sample( sample_type=sample_type if sample_type is not None else self.sample_type, filename=filename if filename is not None else self.filename, - person_id=person_id if person_id is not None else self.person_id, face_type=face_type if face_type is not None else self.face_type, shape=shape if shape is not None else self.shape, landmarks=landmarks if landmarks is not None else self.landmarks.copy(), @@ -65,30 +102,6 @@ class Sample(object): pitch_yaw_roll=pitch_yaw_roll if pitch_yaw_roll is not None else self.pitch_yaw_roll, eyebrows_expand_mod=eyebrows_expand_mod if eyebrows_expand_mod is not None else self.eyebrows_expand_mod, source_filename=source_filename if source_filename is not None else self.source_filename, - mirror=mirror if mirror is not None else self.mirror, - fanseg_mask_exist=fanseg_mask_exist if fanseg_mask_exist is not None else self.fanseg_mask_exist) - - def read_raw_file(self, filename=None): - if self._filename_offset_size is not None: - filename, offset, size = self._filename_offset_size - with open(filename, "rb") as f: - f.seek( offset, 0) - return f.read (size) - else: - with open(filename, "rb") as f: - return f.read() - - def load_bgr(self): - img = cv2_imread (self.filename, loader_func=self.read_raw_file).astype(np.float32) / 255.0 - if self.mirror: - img = img[:,::-1].copy() - return img - - def load_fanseg_mask(self): - if self.fanseg_mask_exist: - filepath = Path(self.filename) - dflimg = DFLIMG.load (filepath) - return dflimg.get_fanseg_mask() - - return None + person_name=person_name if person_name is not None else self.person_name) +""" \ No newline at end of file diff --git a/samplelib/SampleHost.py b/samplelib/SampleHost.py index a165d32..ed825e7 100644 --- a/samplelib/SampleHost.py +++ b/samplelib/SampleHost.py @@ -83,10 +83,7 @@ class SampleHost: landmarks = dflimg.get_landmarks() pitch_yaw_roll = dflimg.get_pitch_yaw_roll() eyebrows_expand_mod = dflimg.get_eyebrows_expand_mod() - - if pitch_yaw_roll is None: - pitch_yaw_roll = LandmarksProcessor.estimate_pitch_yaw_roll(landmarks) - + sample_list.append( Sample(filename=filename, sample_type=SampleType.FACE, face_type=FaceType.fromString (dflimg.get_face_type()), diff --git a/samplelib/SampleProcessor.py b/samplelib/SampleProcessor.py index ee74cab..726920f 100644 --- a/samplelib/SampleProcessor.py +++ b/samplelib/SampleProcessor.py @@ -148,11 +148,8 @@ class SampleProcessor(object): l = np.clip(l, 0.0, 1.0) img = l elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID: - pitch_yaw_roll = sample.pitch_yaw_roll - if pitch_yaw_roll is not None: - pitch, yaw, roll = pitch_yaw_roll - else: - pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll (sample.landmarks) + pitch_yaw_roll = sample.get_pitch_yaw_roll() + if params['flip']: yaw = -yaw @@ -185,13 +182,10 @@ class SampleProcessor(object): ### Prepare a mask mask = None if is_face_sample: - mask = sample.load_fanseg_mask() #using fanseg_mask if exist - - if mask is None: - if sample.eyebrows_expand_mod is not None: - mask = LandmarksProcessor.get_image_hull_mask (img.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod ) - else: - mask = LandmarksProcessor.get_image_hull_mask (img.shape, sample.landmarks) + if sample.eyebrows_expand_mod is not None: + mask = LandmarksProcessor.get_image_hull_mask (img.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod ) + else: + mask = LandmarksProcessor.get_image_hull_mask (img.shape, sample.landmarks) if sample.ie_polys is not None: sample.ie_polys.overlay_mask(mask)