mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 13:02:15 -07:00
refactorings
This commit is contained in:
parent
e0e8970ab9
commit
754d6c385c
13 changed files with 243 additions and 104 deletions
|
@ -5,7 +5,6 @@ import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from facelib import FaceType
|
from facelib import FaceType
|
||||||
from imagelib import IEPolys
|
|
||||||
from utils.struct_utils import *
|
from utils.struct_utils import *
|
||||||
from interact import interact as io
|
from interact import interact as io
|
||||||
|
|
||||||
|
@ -306,7 +305,7 @@ class DFLJPG(object):
|
||||||
|
|
||||||
def get_face_type(self): return self.dfl_dict['face_type']
|
def get_face_type(self): return self.dfl_dict['face_type']
|
||||||
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
|
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
|
||||||
def get_ie_polys(self): return IEPolys.load(self.dfl_dict.get('ie_polys',None))
|
def get_ie_polys(self): return self.dfl_dict.get('ie_polys',None)
|
||||||
def get_source_filename(self): return self.dfl_dict['source_filename']
|
def get_source_filename(self): return self.dfl_dict['source_filename']
|
||||||
def get_source_rect(self): return self.dfl_dict['source_rect']
|
def get_source_rect(self): return self.dfl_dict['source_rect']
|
||||||
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
|
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
|
||||||
|
|
|
@ -7,7 +7,6 @@ import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from facelib import FaceType
|
from facelib import FaceType
|
||||||
from imagelib import IEPolys
|
|
||||||
|
|
||||||
PNG_HEADER = b"\x89PNG\r\n\x1a\n"
|
PNG_HEADER = b"\x89PNG\r\n\x1a\n"
|
||||||
|
|
||||||
|
@ -413,7 +412,7 @@ class DFLPNG(object):
|
||||||
|
|
||||||
def get_face_type(self): return self.dfl_dict['face_type']
|
def get_face_type(self): return self.dfl_dict['face_type']
|
||||||
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
|
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
|
||||||
def get_ie_polys(self): return IEPolys.load(self.dfl_dict.get('ie_polys',None))
|
def get_ie_polys(self): return self.dfl_dict.get('ie_polys',None)
|
||||||
def get_source_filename(self): return self.dfl_dict['source_filename']
|
def get_source_filename(self): return self.dfl_dict['source_filename']
|
||||||
def get_source_rect(self): return self.dfl_dict['source_rect']
|
def get_source_rect(self): return self.dfl_dict['source_rect']
|
||||||
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
|
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
|
||||||
|
|
|
@ -97,7 +97,7 @@ class IEPolys:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(ie_polys=None):
|
def load(ie_polys=None):
|
||||||
obj = IEPolys()
|
obj = IEPolys()
|
||||||
if ie_polys is not None:
|
if ie_polys is not None and isinstance(ie_polys, list):
|
||||||
for (type, points) in ie_polys:
|
for (type, points) in ie_polys:
|
||||||
obj.add(type)
|
obj.add(type)
|
||||||
obj.n_list().set_points(points)
|
obj.n_list().set_points(points)
|
||||||
|
|
|
@ -403,7 +403,7 @@ def mask_editor_main(input_dir, confirmed_dir=None, skipped_dir=None, no_default
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
lmrks = dflimg.get_landmarks()
|
lmrks = dflimg.get_landmarks()
|
||||||
ie_polys = dflimg.get_ie_polys()
|
ie_polys = IEPolys.load(dflimg.get_ie_polys())
|
||||||
fanseg_mask = dflimg.get_fanseg_mask()
|
fanseg_mask = dflimg.get_fanseg_mask()
|
||||||
|
|
||||||
if filepath.name in cached_images:
|
if filepath.name in cached_images:
|
||||||
|
@ -521,7 +521,7 @@ def mask_editor_main(input_dir, confirmed_dir=None, skipped_dir=None, no_default
|
||||||
do_save_move_count -= 1
|
do_save_move_count -= 1
|
||||||
|
|
||||||
ed.mask_finish()
|
ed.mask_finish()
|
||||||
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys(), eyebrows_expand_mod=eyebrows_expand_mod )
|
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys().dump(), eyebrows_expand_mod=eyebrows_expand_mod )
|
||||||
|
|
||||||
done_paths += [ confirmed_path / filepath.name ]
|
done_paths += [ confirmed_path / filepath.name ]
|
||||||
done_images_types[filepath.name] = 2
|
done_images_types[filepath.name] = 2
|
||||||
|
@ -532,7 +532,7 @@ def mask_editor_main(input_dir, confirmed_dir=None, skipped_dir=None, no_default
|
||||||
do_save_count -= 1
|
do_save_count -= 1
|
||||||
|
|
||||||
ed.mask_finish()
|
ed.mask_finish()
|
||||||
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys(), eyebrows_expand_mod=eyebrows_expand_mod )
|
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys().dump(), eyebrows_expand_mod=eyebrows_expand_mod )
|
||||||
|
|
||||||
done_paths += [ filepath ]
|
done_paths += [ filepath ]
|
||||||
done_images_types[filepath.name] = 2
|
done_images_types[filepath.name] = 2
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
import cv2
|
import pickle
|
||||||
import pickle
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
from DFLIMG import *
|
||||||
from facelib import LandmarksProcessor
|
from facelib import LandmarksProcessor
|
||||||
|
from imagelib import IEPolys
|
||||||
from interact import interact as io
|
from interact import interact as io
|
||||||
from utils import Path_utils
|
from utils import Path_utils
|
||||||
from utils.cv2_utils import *
|
from utils.cv2_utils import *
|
||||||
from DFLIMG import *
|
|
||||||
|
|
||||||
def save_faceset_metadata_folder(input_path):
|
def save_faceset_metadata_folder(input_path):
|
||||||
input_path = Path(input_path)
|
input_path = Path(input_path)
|
||||||
|
@ -167,7 +170,7 @@ def add_landmarks_debug_images(input_path):
|
||||||
|
|
||||||
if img is not None:
|
if img is not None:
|
||||||
face_landmarks = dflimg.get_landmarks()
|
face_landmarks = dflimg.get_landmarks()
|
||||||
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=dflimg.get_ie_polys() )
|
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load(dflimg.get_ie_polys()) )
|
||||||
|
|
||||||
output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
|
output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
|
||||||
cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
|
||||||
|
|
|
@ -5,7 +5,7 @@ from pathlib import Path
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from DFLIMG import DFLIMG
|
from DFLIMG import *
|
||||||
from facelib import FaceType, LandmarksProcessor
|
from facelib import FaceType, LandmarksProcessor
|
||||||
from interact import interact as io
|
from interact import interact as io
|
||||||
from joblib import Subprocessor
|
from joblib import Subprocessor
|
||||||
|
@ -475,7 +475,7 @@ def dev_test(input_dir):
|
||||||
|
|
||||||
dir_names = Path_utils.get_all_dir_names(input_path)
|
dir_names = Path_utils.get_all_dir_names(input_path)
|
||||||
|
|
||||||
for dir_name in dir_names:
|
for dir_name in io.progress_bar_generator(dir_names, desc="Processing"):
|
||||||
|
|
||||||
img_paths = Path_utils.get_image_paths (input_path / dir_name)
|
img_paths = Path_utils.get_image_paths (input_path / dir_name)
|
||||||
for filename in img_paths:
|
for filename in img_paths:
|
||||||
|
@ -485,7 +485,9 @@ def dev_test(input_dir):
|
||||||
if dflimg is None:
|
if dflimg is None:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
import code
|
dflimg.embed_and_set(filename, person_name=dir_name)
|
||||||
code.interact(local=dict(globals(), **locals()))
|
|
||||||
|
#import code
|
||||||
|
#code.interact(local=dict(globals(), **locals()))
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
import pickle
|
import pickle
|
||||||
|
import shutil
|
||||||
import struct
|
import struct
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from interact import interact as io
|
|
||||||
from utils import Path_utils
|
|
||||||
|
|
||||||
|
|
||||||
import samplelib.SampleHost
|
import samplelib.SampleHost
|
||||||
|
from interact import interact as io
|
||||||
from samplelib import Sample
|
from samplelib import Sample
|
||||||
|
from utils import Path_utils
|
||||||
|
|
||||||
packed_faceset_filename = 'faceset.pak'
|
packed_faceset_filename = 'faceset.pak'
|
||||||
|
|
||||||
|
@ -24,7 +23,18 @@ class PackedFaceset():
|
||||||
|
|
||||||
of = open(samples_dat_path, "wb")
|
of = open(samples_dat_path, "wb")
|
||||||
|
|
||||||
image_paths = Path_utils.get_image_paths(samples_path)
|
as_person_faceset = False
|
||||||
|
dir_names = Path_utils.get_all_dir_names(samples_path)
|
||||||
|
if len(dir_names) != 0:
|
||||||
|
as_person_faceset = io.input_bool(f"{len(dir_names)} subdirectories found, process as person faceset? (y/n) skip:y : ", True)
|
||||||
|
|
||||||
|
if as_person_faceset:
|
||||||
|
image_paths = []
|
||||||
|
|
||||||
|
for dir_name in dir_names:
|
||||||
|
image_paths += Path_utils.get_image_paths(samples_path / dir_name)
|
||||||
|
else:
|
||||||
|
image_paths = Path_utils.get_image_paths(samples_path)
|
||||||
|
|
||||||
|
|
||||||
samples = samplelib.SampleHost.load_face_samples(image_paths)
|
samples = samplelib.SampleHost.load_face_samples(image_paths)
|
||||||
|
@ -32,7 +42,11 @@ class PackedFaceset():
|
||||||
|
|
||||||
samples_configs = []
|
samples_configs = []
|
||||||
for sample in samples:
|
for sample in samples:
|
||||||
sample.filename = str(Path(sample.filename).relative_to(samples_path))
|
sample_filepath = Path(sample.filename)
|
||||||
|
sample.filename = sample_filepath.name
|
||||||
|
|
||||||
|
if as_person_faceset:
|
||||||
|
sample.person_name = sample_filepath.parent.name
|
||||||
samples_configs.append ( sample.get_config() )
|
samples_configs.append ( sample.get_config() )
|
||||||
samples_bytes = pickle.dumps(samples_configs, 4)
|
samples_bytes = pickle.dumps(samples_configs, 4)
|
||||||
|
|
||||||
|
@ -48,7 +62,12 @@ class PackedFaceset():
|
||||||
|
|
||||||
for sample in io.progress_bar_generator(samples, "Packing"):
|
for sample in io.progress_bar_generator(samples, "Packing"):
|
||||||
try:
|
try:
|
||||||
with open( samples_path / sample.filename, "rb") as f:
|
if sample.person_name is not None:
|
||||||
|
sample_path = samples_path / sample.person_name / sample.filename
|
||||||
|
else:
|
||||||
|
sample_path = samples_path / sample.filename
|
||||||
|
|
||||||
|
with open(sample_path, "rb") as f:
|
||||||
b = f.read()
|
b = f.read()
|
||||||
|
|
||||||
offsets.append ( of.tell() - data_start_offset )
|
offsets.append ( of.tell() - data_start_offset )
|
||||||
|
@ -67,6 +86,13 @@ class PackedFaceset():
|
||||||
for filename in io.progress_bar_generator(image_paths,"Deleting"):
|
for filename in io.progress_bar_generator(image_paths,"Deleting"):
|
||||||
Path(filename).unlink()
|
Path(filename).unlink()
|
||||||
|
|
||||||
|
if as_person_faceset:
|
||||||
|
for dir_name in dir_names:
|
||||||
|
dir_path = samples_path / dir_name
|
||||||
|
try:
|
||||||
|
shutil.rmtree(dir_path)
|
||||||
|
except:
|
||||||
|
io.log_info (f"unable to remove: {dir_path} ")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def unpack(samples_path):
|
def unpack(samples_path):
|
||||||
|
@ -78,7 +104,16 @@ class PackedFaceset():
|
||||||
samples = PackedFaceset.load(samples_path)
|
samples = PackedFaceset.load(samples_path)
|
||||||
|
|
||||||
for sample in io.progress_bar_generator(samples, "Unpacking"):
|
for sample in io.progress_bar_generator(samples, "Unpacking"):
|
||||||
with open(samples_path / sample.filename, "wb") as f:
|
person_name = sample.person_name
|
||||||
|
if person_name is not None:
|
||||||
|
person_path = samples_path / person_name
|
||||||
|
person_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
target_filepath = person_path / sample.filename
|
||||||
|
else:
|
||||||
|
target_filepath = samples_path / sample.filename
|
||||||
|
|
||||||
|
with open(target_filepath, "wb") as f:
|
||||||
f.write( sample.read_raw_file() )
|
f.write( sample.read_raw_file() )
|
||||||
|
|
||||||
samples_dat_path.unlink()
|
samples_dat_path.unlink()
|
||||||
|
@ -110,4 +145,3 @@ class PackedFaceset():
|
||||||
sample.set_filename_offset_size( str(samples_dat_path), data_start_offset+start_offset, end_offset-start_offset )
|
sample.set_filename_offset_size( str(samples_dat_path), data_start_offset+start_offset, end_offset-start_offset )
|
||||||
|
|
||||||
return samples
|
return samples
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ import numpy as np
|
||||||
from utils.cv2_utils import *
|
from utils.cv2_utils import *
|
||||||
from DFLIMG import *
|
from DFLIMG import *
|
||||||
from facelib import LandmarksProcessor
|
from facelib import LandmarksProcessor
|
||||||
|
from imagelib import IEPolys
|
||||||
|
|
||||||
class SampleType(IntEnum):
|
class SampleType(IntEnum):
|
||||||
IMAGE = 0 #raw image
|
IMAGE = 0 #raw image
|
||||||
|
@ -50,11 +51,13 @@ class Sample(object):
|
||||||
self.face_type = face_type
|
self.face_type = face_type
|
||||||
self.shape = shape
|
self.shape = shape
|
||||||
self.landmarks = np.array(landmarks) if landmarks is not None else None
|
self.landmarks = np.array(landmarks) if landmarks is not None else None
|
||||||
self.ie_polys = ie_polys
|
self.ie_polys = IEPolys.load(ie_polys)
|
||||||
self.eyebrows_expand_mod = eyebrows_expand_mod
|
self.eyebrows_expand_mod = eyebrows_expand_mod
|
||||||
self.source_filename = source_filename
|
self.source_filename = source_filename
|
||||||
self.person_name = person_name
|
self.person_name = person_name
|
||||||
self.pitch_yaw_roll = pitch_yaw_roll
|
self.pitch_yaw_roll = pitch_yaw_roll
|
||||||
|
|
||||||
|
self._filename_offset_size = None
|
||||||
|
|
||||||
def get_pitch_yaw_roll(self):
|
def get_pitch_yaw_roll(self):
|
||||||
if self.pitch_yaw_roll is None:
|
if self.pitch_yaw_roll is None:
|
||||||
|
@ -84,7 +87,7 @@ class Sample(object):
|
||||||
'face_type': self.face_type,
|
'face_type': self.face_type,
|
||||||
'shape': self.shape,
|
'shape': self.shape,
|
||||||
'landmarks': self.landmarks.tolist(),
|
'landmarks': self.landmarks.tolist(),
|
||||||
'ie_polys': self.ie_polys,
|
'ie_polys': self.ie_polys.dump(),
|
||||||
'eyebrows_expand_mod': self.eyebrows_expand_mod,
|
'eyebrows_expand_mod': self.eyebrows_expand_mod,
|
||||||
'source_filename': self.source_filename,
|
'source_filename': self.source_filename,
|
||||||
'person_name': self.person_name
|
'person_name': self.person_name
|
||||||
|
|
|
@ -31,7 +31,7 @@ class SampleGeneratorFace(SampleGeneratorBase):
|
||||||
self.add_sample_idx = add_sample_idx
|
self.add_sample_idx = add_sample_idx
|
||||||
|
|
||||||
samples_host = SampleHost.mp_host (SampleType.FACE, self.samples_path)
|
samples_host = SampleHost.mp_host (SampleType.FACE, self.samples_path)
|
||||||
self.samples_len = len(samples_host)
|
self.samples_len = len(samples_host.get_list())
|
||||||
|
|
||||||
if self.samples_len == 0:
|
if self.samples_len == 0:
|
||||||
raise ValueError('No training data provided.')
|
raise ValueError('No training data provided.')
|
||||||
|
@ -40,7 +40,7 @@ class SampleGeneratorFace(SampleGeneratorBase):
|
||||||
|
|
||||||
if random_ct_samples_path is not None:
|
if random_ct_samples_path is not None:
|
||||||
ct_samples_host = SampleHost.mp_host (SampleType.FACE, random_ct_samples_path)
|
ct_samples_host = SampleHost.mp_host (SampleType.FACE, random_ct_samples_path)
|
||||||
ct_index_host = mp_utils.IndexHost( len(ct_samples_host) )
|
ct_index_host = mp_utils.IndexHost( len(ct_samples_host.get_list()) )
|
||||||
else:
|
else:
|
||||||
ct_samples_host = None
|
ct_samples_host = None
|
||||||
ct_index_host = None
|
ct_index_host = None
|
||||||
|
@ -76,7 +76,8 @@ class SampleGeneratorFace(SampleGeneratorBase):
|
||||||
ct_indexes = ct_index_host.get(bs) if ct_samples is not None else None
|
ct_indexes = ct_index_host.get(bs) if ct_samples is not None else None
|
||||||
|
|
||||||
for n_batch in range(bs):
|
for n_batch in range(bs):
|
||||||
sample = samples[ indexes[n_batch] ]
|
sample_idx = indexes[n_batch]
|
||||||
|
sample = samples[ sample_idx ]
|
||||||
ct_sample = ct_samples[ ct_indexes[n_batch] ] if ct_samples is not None else None
|
ct_sample = ct_samples[ ct_indexes[n_batch] ] if ct_samples is not None else None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -94,9 +95,5 @@ class SampleGeneratorFace(SampleGeneratorBase):
|
||||||
batches[i].append ( x[i] )
|
batches[i].append ( x[i] )
|
||||||
|
|
||||||
if self.add_sample_idx:
|
if self.add_sample_idx:
|
||||||
batches[i_sample_idx].append (idx)
|
batches[i_sample_idx].append (sample_idx)
|
||||||
yield [ np.array(batch) for batch in batches]
|
yield [ np.array(batch) for batch in batches]
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_person_id_max_count(samples_path):
|
|
||||||
return SampleHost.get_person_id_max_count(samples_path)
|
|
|
@ -8,7 +8,7 @@ import numpy as np
|
||||||
from facelib import LandmarksProcessor
|
from facelib import LandmarksProcessor
|
||||||
from samplelib import (SampleGeneratorBase, SampleHost, SampleProcessor,
|
from samplelib import (SampleGeneratorBase, SampleHost, SampleProcessor,
|
||||||
SampleType)
|
SampleType)
|
||||||
from utils import iter_utils
|
from utils import iter_utils, mp_utils
|
||||||
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
@ -23,9 +23,6 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
|
||||||
sample_process_options=SampleProcessor.Options(),
|
sample_process_options=SampleProcessor.Options(),
|
||||||
output_sample_types=[],
|
output_sample_types=[],
|
||||||
person_id_mode=1,
|
person_id_mode=1,
|
||||||
use_caching=False,
|
|
||||||
generators_count=2,
|
|
||||||
generators_random_seed=None,
|
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
|
||||||
super().__init__(samples_path, debug, batch_size)
|
super().__init__(samples_path, debug, batch_size)
|
||||||
|
@ -33,51 +30,32 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
|
||||||
self.output_sample_types = output_sample_types
|
self.output_sample_types = output_sample_types
|
||||||
self.person_id_mode = person_id_mode
|
self.person_id_mode = person_id_mode
|
||||||
|
|
||||||
if generators_random_seed is not None and len(generators_random_seed) != generators_count:
|
|
||||||
raise ValueError("len(generators_random_seed) != generators_count")
|
samples_host = SampleHost.mp_host (SampleType.FACE, self.samples_path)
|
||||||
self.generators_random_seed = generators_random_seed
|
samples = samples_host.get_list()
|
||||||
|
|
||||||
samples = SampleHost.load (SampleType.FACE, self.samples_path, person_id_mode=True, use_caching=use_caching)
|
|
||||||
samples = copy.copy(samples)
|
|
||||||
for i in range(len(samples)):
|
|
||||||
samples[i] = copy.copy(samples[i])
|
|
||||||
|
|
||||||
if person_id_mode==1:
|
|
||||||
#np.random.shuffle(samples)
|
|
||||||
#
|
|
||||||
#new_samples = []
|
|
||||||
#while len(samples) > 0:
|
|
||||||
# for i in range( len(samples)-1, -1, -1):
|
|
||||||
# sample = samples[i]
|
|
||||||
#
|
|
||||||
# if len(sample) > 0:
|
|
||||||
# new_samples.append(sample.pop(0))
|
|
||||||
#
|
|
||||||
# if len(sample) == 0:
|
|
||||||
# samples.pop(i)
|
|
||||||
# i -= 1
|
|
||||||
#samples = new_samples
|
|
||||||
new_samples = []
|
|
||||||
for s in samples:
|
|
||||||
new_samples += s
|
|
||||||
samples = new_samples
|
|
||||||
np.random.shuffle(samples)
|
|
||||||
|
|
||||||
self.samples_len = len(samples)
|
self.samples_len = len(samples)
|
||||||
|
|
||||||
if self.samples_len == 0:
|
if self.samples_len == 0:
|
||||||
raise ValueError('No training data provided.')
|
raise ValueError('No training data provided.')
|
||||||
|
|
||||||
|
persons_name_idxs = {}
|
||||||
|
|
||||||
|
for i,sample in enumerate(samples):
|
||||||
|
person_name = sample.person_name
|
||||||
|
if person_name not in persons_name_idxs:
|
||||||
|
persons_name_idxs[person_name] = []
|
||||||
|
persons_name_idxs[person_name].append (i)
|
||||||
|
|
||||||
|
indexes2D = [ persons_name_idxs[person_name] for person_name in sorted(list(persons_name_idxs.keys())) ]
|
||||||
|
index2d_host = mp_utils.Index2DHost(indexes2D)
|
||||||
|
|
||||||
|
|
||||||
if self.debug:
|
if self.debug:
|
||||||
self.generators_count = 1
|
self.generators_count = 1
|
||||||
self.generators = [iter_utils.ThisThreadGenerator ( self.batch_func, (0, samples) )]
|
self.generators = [iter_utils.ThisThreadGenerator ( self.batch_func, (samples_host.create_cli(), index2d_host.create_cli(),) )]
|
||||||
else:
|
else:
|
||||||
self.generators_count = min ( generators_count, self.samples_len )
|
self.generators_count = np.clip(multiprocessing.cpu_count(), 2, 4)
|
||||||
|
self.generators = [iter_utils.SubprocessGenerator ( self.batch_func, (samples_host.create_cli(), index2d_host.create_cli(),), start_now=True ) for i in range(self.generators_count) ]
|
||||||
if person_id_mode==1:
|
|
||||||
self.generators = [iter_utils.SubprocessGenerator ( self.batch_func, (i, samples[i::self.generators_count]) ) for i in range(self.generators_count) ]
|
|
||||||
else:
|
|
||||||
self.generators = [iter_utils.SubprocessGenerator ( self.batch_func, (i, samples) ) for i in range(self.generators_count) ]
|
|
||||||
|
|
||||||
self.generator_counter = -1
|
self.generator_counter = -1
|
||||||
|
|
||||||
|
@ -94,12 +72,43 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
|
||||||
return next(generator)
|
return next(generator)
|
||||||
|
|
||||||
def batch_func(self, param ):
|
def batch_func(self, param ):
|
||||||
generator_id, samples = param
|
samples, index2d_host, = param
|
||||||
|
bs = self.batch_size
|
||||||
|
|
||||||
if self.generators_random_seed is not None:
|
while True:
|
||||||
np.random.seed ( self.generators_random_seed[generator_id] )
|
person_idxs = index2d_host.get_1D(bs)
|
||||||
|
samples_idxs = index2d_host.get_2D(person_idxs, 1)
|
||||||
|
|
||||||
|
batches = None
|
||||||
|
for n_batch in range(bs):
|
||||||
|
person_id = person_idxs[n_batch]
|
||||||
|
sample_idx = samples_idxs[n_batch][0]
|
||||||
|
|
||||||
if self.person_id_mode==1:
|
sample = samples[ sample_idx ]
|
||||||
|
try:
|
||||||
|
x, = SampleProcessor.process ([sample], self.sample_process_options, self.output_sample_types, self.debug)
|
||||||
|
except:
|
||||||
|
raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) )
|
||||||
|
|
||||||
|
if batches is None:
|
||||||
|
batches = [ [] for _ in range(len(x)) ]
|
||||||
|
|
||||||
|
batches += [ [] ]
|
||||||
|
i_person_id = len(batches)-1
|
||||||
|
|
||||||
|
for i in range(len(x)):
|
||||||
|
batches[i].append ( x[i] )
|
||||||
|
|
||||||
|
batches[i_person_id].append ( np.array([person_id]) )
|
||||||
|
|
||||||
|
yield [ np.array(batch) for batch in batches]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_person_id_max_count(samples_path):
|
||||||
|
return SampleHost.get_person_id_max_count(samples_path)
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.person_id_mode==1:
|
||||||
samples_len = len(samples)
|
samples_len = len(samples)
|
||||||
samples_idxs = [*range(samples_len)]
|
samples_idxs = [*range(samples_len)]
|
||||||
shuffle_idxs = []
|
shuffle_idxs = []
|
||||||
|
@ -132,9 +141,7 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
|
||||||
samples_idxs[i] = [*range(len(samples[i]))]
|
samples_idxs[i] = [*range(len(samples[i]))]
|
||||||
shuffle_idxs[i] = []
|
shuffle_idxs[i] = []
|
||||||
|
|
||||||
while True:
|
if self.person_id_mode==2:
|
||||||
|
|
||||||
if self.person_id_mode==2:
|
|
||||||
if len(shuffle_person_idxs) == 0:
|
if len(shuffle_person_idxs) == 0:
|
||||||
shuffle_person_idxs = person_idxs.copy()
|
shuffle_person_idxs = person_idxs.copy()
|
||||||
np.random.shuffle(shuffle_person_idxs)
|
np.random.shuffle(shuffle_person_idxs)
|
||||||
|
@ -270,9 +277,4 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
|
||||||
batches[i_person_id1].append ( np.array([sample1.person_id]) )
|
batches[i_person_id1].append ( np.array([sample1.person_id]) )
|
||||||
|
|
||||||
batches[i_person_id2].append ( np.array([sample2.person_id]) )
|
batches[i_person_id2].append ( np.array([sample2.person_id]) )
|
||||||
|
"""
|
||||||
yield [ np.array(batch) for batch in batches]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_person_id_max_count(samples_path):
|
|
||||||
return SampleHost.get_person_id_max_count(samples_path)
|
|
|
@ -16,8 +16,19 @@ class SampleHost:
|
||||||
host_cache = dict()
|
host_cache = dict()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_person_id_max_count(samples_path):
|
def get_person_id_max_count(samples_path):
|
||||||
return len ( Path_utils.get_all_dir_names(samples_path) )
|
samples = None
|
||||||
|
try:
|
||||||
|
samples = samplelib.PackedFaceset.load(samples_path)
|
||||||
|
except:
|
||||||
|
io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}")
|
||||||
|
|
||||||
|
if samples is None:
|
||||||
|
raise ValueError("packed faceset not found.")
|
||||||
|
persons_name_idxs = {}
|
||||||
|
for sample in samples:
|
||||||
|
persons_name_idxs[sample.person_name] = 0
|
||||||
|
return len(list(persons_name_idxs.keys()))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(sample_type, samples_path):
|
def load(sample_type, samples_path):
|
||||||
|
@ -79,21 +90,17 @@ class SampleHost:
|
||||||
if dflimg is None:
|
if dflimg is None:
|
||||||
io.log_err ("load_face_samples: %s is not a dfl image file required for training" % (filename_path.name) )
|
io.log_err ("load_face_samples: %s is not a dfl image file required for training" % (filename_path.name) )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
landmarks = dflimg.get_landmarks()
|
|
||||||
pitch_yaw_roll = dflimg.get_pitch_yaw_roll()
|
|
||||||
eyebrows_expand_mod = dflimg.get_eyebrows_expand_mod()
|
|
||||||
|
|
||||||
sample_list.append( Sample(filename=filename,
|
sample_list.append( Sample(filename=filename,
|
||||||
sample_type=SampleType.FACE,
|
sample_type=SampleType.FACE,
|
||||||
face_type=FaceType.fromString (dflimg.get_face_type()),
|
face_type=FaceType.fromString (dflimg.get_face_type()),
|
||||||
shape=dflimg.get_shape(),
|
shape=dflimg.get_shape(),
|
||||||
landmarks=landmarks,
|
landmarks=dflimg.get_landmarks(),
|
||||||
ie_polys=dflimg.get_ie_polys(),
|
ie_polys=dflimg.get_ie_polys(),
|
||||||
pitch_yaw_roll=pitch_yaw_roll,
|
pitch_yaw_roll=dflimg.get_pitch_yaw_roll(),
|
||||||
eyebrows_expand_mod=eyebrows_expand_mod,
|
eyebrows_expand_mod=dflimg.get_eyebrows_expand_mod(),
|
||||||
source_filename=dflimg.get_source_filename(),
|
source_filename=dflimg.get_source_filename(),
|
||||||
fanseg_mask_exist=dflimg.get_fanseg_mask() is not None, ) )
|
))
|
||||||
except:
|
except:
|
||||||
io.log_err ("Unable to load %s , error: %s" % (filename, traceback.format_exc() ) )
|
io.log_err ("Unable to load %s , error: %s" % (filename, traceback.format_exc() ) )
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import traceback
|
||||||
|
|
||||||
#allows to open non-english characters path
|
#allows to open non-english characters path
|
||||||
def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None):
|
def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None):
|
||||||
|
@ -13,6 +14,7 @@ def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None):
|
||||||
numpyarray = np.asarray(bytes, dtype=np.uint8)
|
numpyarray = np.asarray(bytes, dtype=np.uint8)
|
||||||
return cv2.imdecode(numpyarray, flags)
|
return cv2.imdecode(numpyarray, flags)
|
||||||
except:
|
except:
|
||||||
|
io.log_err(f"Exception occured in cv2_imread : {traceback.format_exc()}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def cv2_imwrite(filename, img, *args):
|
def cv2_imwrite(filename, img, *args):
|
||||||
|
|
|
@ -5,6 +5,97 @@ import time
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class Index2DHost():
|
||||||
|
"""
|
||||||
|
Provides random shuffled 2D indexes for multiprocesses
|
||||||
|
"""
|
||||||
|
def __init__(self, indexes2D):
|
||||||
|
self.sq = multiprocessing.Queue()
|
||||||
|
self.cqs = []
|
||||||
|
self.clis = []
|
||||||
|
self.thread = threading.Thread(target=self.host_thread, args=(indexes2D,) )
|
||||||
|
self.thread.daemon = True
|
||||||
|
self.thread.start()
|
||||||
|
|
||||||
|
def host_thread(self, indexes2D):
|
||||||
|
indexes_counts_len = len(indexes2D)
|
||||||
|
|
||||||
|
idxs = [*range(indexes_counts_len)]
|
||||||
|
idxs_2D = [None]*indexes_counts_len
|
||||||
|
shuffle_idxs = []
|
||||||
|
shuffle_idxs_2D = [None]*indexes_counts_len
|
||||||
|
for i in range(indexes_counts_len):
|
||||||
|
idxs_2D[i] = indexes2D[i]
|
||||||
|
shuffle_idxs_2D[i] = []
|
||||||
|
|
||||||
|
sq = self.sq
|
||||||
|
|
||||||
|
while True:
|
||||||
|
while not sq.empty():
|
||||||
|
obj = sq.get()
|
||||||
|
cq_id, cmd = obj[0], obj[1]
|
||||||
|
|
||||||
|
if cmd == 0: #get_1D
|
||||||
|
count = obj[2]
|
||||||
|
|
||||||
|
result = []
|
||||||
|
for i in range(count):
|
||||||
|
if len(shuffle_idxs) == 0:
|
||||||
|
shuffle_idxs = idxs.copy()
|
||||||
|
np.random.shuffle(shuffle_idxs)
|
||||||
|
result.append(shuffle_idxs.pop())
|
||||||
|
self.cqs[cq_id].put (result)
|
||||||
|
elif cmd == 1: #get_2D
|
||||||
|
targ_idxs,count = obj[2], obj[3]
|
||||||
|
result = []
|
||||||
|
|
||||||
|
for targ_idx in targ_idxs:
|
||||||
|
sub_idxs = []
|
||||||
|
for i in range(count):
|
||||||
|
ar = shuffle_idxs_2D[targ_idx]
|
||||||
|
if len(ar) == 0:
|
||||||
|
ar = shuffle_idxs_2D[targ_idx] = idxs_2D[targ_idx].copy()
|
||||||
|
np.random.shuffle(ar)
|
||||||
|
sub_idxs.append(ar.pop())
|
||||||
|
result.append (sub_idxs)
|
||||||
|
self.cqs[cq_id].put (result)
|
||||||
|
|
||||||
|
time.sleep(0.005)
|
||||||
|
|
||||||
|
def create_cli(self):
|
||||||
|
cq = multiprocessing.Queue()
|
||||||
|
self.cqs.append ( cq )
|
||||||
|
cq_id = len(self.cqs)-1
|
||||||
|
return Index2DHost.Cli(self.sq, cq, cq_id)
|
||||||
|
|
||||||
|
# disable pickling
|
||||||
|
def __getstate__(self):
|
||||||
|
return dict()
|
||||||
|
def __setstate__(self, d):
|
||||||
|
self.__dict__.update(d)
|
||||||
|
|
||||||
|
class Cli():
|
||||||
|
def __init__(self, sq, cq, cq_id):
|
||||||
|
self.sq = sq
|
||||||
|
self.cq = cq
|
||||||
|
self.cq_id = cq_id
|
||||||
|
|
||||||
|
def get_1D(self, count):
|
||||||
|
self.sq.put ( (self.cq_id,0, count) )
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if not self.cq.empty():
|
||||||
|
return self.cq.get()
|
||||||
|
time.sleep(0.001)
|
||||||
|
|
||||||
|
def get_2D(self, idxs, count):
|
||||||
|
self.sq.put ( (self.cq_id,1,idxs,count) )
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if not self.cq.empty():
|
||||||
|
return self.cq.get()
|
||||||
|
time.sleep(0.001)
|
||||||
|
|
||||||
class IndexHost():
|
class IndexHost():
|
||||||
"""
|
"""
|
||||||
Provides random shuffled indexes for multiprocesses
|
Provides random shuffled indexes for multiprocesses
|
||||||
|
@ -93,8 +184,8 @@ class ListHost():
|
||||||
cq_id = len(self.cqs)-1
|
cq_id = len(self.cqs)-1
|
||||||
return ListHost.Cli(self.sq, cq, cq_id)
|
return ListHost.Cli(self.sq, cq, cq_id)
|
||||||
|
|
||||||
def __len__(self):
|
def get_list(self):
|
||||||
return len(self.list_)
|
return self.list_
|
||||||
|
|
||||||
# disable pickling
|
# disable pickling
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue