mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-14 02:37:00 -07:00
refactoring. Added RecycleGAN for testing.
This commit is contained in:
parent
8686309417
commit
f8824f9601
24 changed files with 1661 additions and 1505 deletions
|
@ -41,25 +41,20 @@ def model_process(model_name, model_dir, in_options, sq, cq):
|
|||
|
||||
cq.put ( {'op':'init', 'converter' : converter.copy_and_set_predictor( None ) } )
|
||||
|
||||
closing = False
|
||||
while not closing:
|
||||
while True:
|
||||
while not sq.empty():
|
||||
obj = sq.get()
|
||||
obj_op = obj['op']
|
||||
if obj_op == 'predict':
|
||||
result = converter.predictor ( obj['face'] )
|
||||
cq.put ( {'op':'predict_result', 'result':result} )
|
||||
elif obj_op == 'close':
|
||||
closing = True
|
||||
break
|
||||
time.sleep(0.005)
|
||||
|
||||
model.finalize()
|
||||
|
||||
except Exception as e:
|
||||
print ( 'Error: %s' % (str(e)))
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
|
||||
|
||||
from utils.SubprocessorBase import SubprocessorBase
|
||||
class ConvertSubprocessor(SubprocessorBase):
|
||||
|
||||
|
@ -129,10 +124,11 @@ class ConvertSubprocessor(SubprocessorBase):
|
|||
self.alignments = client_dict['alignments']
|
||||
self.debug = client_dict['debug']
|
||||
|
||||
import gpufmkmgr
|
||||
from nnlib import nnlib
|
||||
#model process ate all GPU mem,
|
||||
#so we cannot use GPU for any TF operations in converter processes (for example image_utils.TFLabConverter)
|
||||
gpufmkmgr.set_prefer_GPUConfig ( gpufmkmgr.GPUConfig (cpu_only=True) )
|
||||
#so we cannot use GPU for any TF operations in converter processes (for example image_utils.TFLabConverter)
|
||||
#therefore forcing prefer_DeviceConfig to CPU only
|
||||
nnlib.prefer_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||
|
||||
return None
|
||||
|
||||
|
@ -156,6 +152,13 @@ class ConvertSubprocessor(SubprocessorBase):
|
|||
image = (cv2.imread(str(filename_path)) / 255.0).astype(np.float32)
|
||||
|
||||
if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
|
||||
image = self.converter.convert_image(image, self.debug)
|
||||
if self.debug:
|
||||
for img in image:
|
||||
cv2.imshow ('Debug convert', img )
|
||||
cv2.waitKey(0)
|
||||
faces_processed = 1
|
||||
elif self.converter.get_mode() == ConverterBase.MODE_IMAGE_WITH_LANDMARKS:
|
||||
image_landmarks = DFLPNG.load( str(filename_path), throw_on_no_embedded_data=True ).get_landmarks()
|
||||
|
||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||
|
@ -270,9 +273,8 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
|
|||
output_path = output_path,
|
||||
alignments = alignments,
|
||||
**in_options ).process()
|
||||
|
||||
model_sq.put ( {'op':'close'} )
|
||||
model_p.join()
|
||||
|
||||
model_p.terminate()
|
||||
|
||||
'''
|
||||
if model_name == 'AVATAR':
|
||||
|
|
|
@ -12,7 +12,7 @@ from utils.DFLPNG import DFLPNG
|
|||
from utils import image_utils
|
||||
from facelib import FaceType
|
||||
import facelib
|
||||
import gpufmkmgr
|
||||
from nnlib import nnlib
|
||||
|
||||
from utils.SubprocessorBase import SubprocessorBase
|
||||
class ExtractSubprocessor(SubprocessorBase):
|
||||
|
@ -63,10 +63,10 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
def get_devices_for_type (self, type, multi_gpu):
|
||||
if (type == 'rects' or type == 'landmarks'):
|
||||
if not multi_gpu:
|
||||
devices = [gpufmkmgr.getBestDeviceIdx()]
|
||||
devices = [nnlib.device.getBestDeviceIdx()]
|
||||
else:
|
||||
devices = gpufmkmgr.getDevicesWithAtLeastTotalMemoryGB(2)
|
||||
devices = [ (idx, gpufmkmgr.getDeviceName(idx), gpufmkmgr.getDeviceVRAMTotalGb(idx) ) for idx in devices]
|
||||
devices = nnlib.device.getDevicesWithAtLeastTotalMemoryGB(2)
|
||||
devices = [ (idx, nnlib.device.getDeviceName(idx), nnlib.device.getDeviceVRAMTotalGb(idx) ) for idx in devices]
|
||||
|
||||
elif type == 'final':
|
||||
devices = [ (i, 'CPU%d' % (i), 0 ) for i in range(0, multiprocessing.cpu_count()) ]
|
||||
|
@ -253,31 +253,22 @@ class ExtractSubprocessor(SubprocessorBase):
|
|||
self.debug = client_dict['debug']
|
||||
self.detector = client_dict['detector']
|
||||
|
||||
self.keras = None
|
||||
self.tf = None
|
||||
self.tf_session = None
|
||||
|
||||
self.e = None
|
||||
|
||||
device_config = nnlib.DeviceConfig ( cpu_only=self.cpu_only, force_best_gpu_idx=self.device_idx, allow_growth=True)
|
||||
if self.type == 'rects':
|
||||
if self.detector is not None:
|
||||
if self.detector == 'mt':
|
||||
|
||||
self.gpu_config = gpufmkmgr.GPUConfig ( cpu_only=self.cpu_only, force_best_gpu_idx=self.device_idx, allow_growth=True)
|
||||
self.tf = gpufmkmgr.import_tf ( self.gpu_config )
|
||||
self.tf_session = gpufmkmgr.get_tf_session()
|
||||
self.keras = gpufmkmgr.import_keras()
|
||||
self.e = facelib.MTCExtractor(self.keras, self.tf, self.tf_session)
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.MTCExtractor(nnlib.keras, nnlib.tf, nnlib.tf_sess)
|
||||
elif self.detector == 'dlib':
|
||||
self.dlib = gpufmkmgr.import_dlib( self.device_idx, cpu_only=self.cpu_only )
|
||||
self.e = facelib.DLIBExtractor(self.dlib)
|
||||
nnlib.import_dlib (device_config)
|
||||
self.e = facelib.DLIBExtractor(nnlib.dlib)
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'landmarks':
|
||||
self.gpu_config = gpufmkmgr.GPUConfig ( cpu_only=self.cpu_only, force_best_gpu_idx=self.device_idx, allow_growth=True)
|
||||
self.tf = gpufmkmgr.import_tf ( self.gpu_config )
|
||||
self.tf_session = gpufmkmgr.get_tf_session()
|
||||
self.keras = gpufmkmgr.import_keras()
|
||||
self.e = facelib.LandmarksExtractor(self.keras)
|
||||
nnlib.import_all (device_config)
|
||||
self.e = facelib.LandmarksExtractor(nnlib.keras)
|
||||
self.e.__enter__()
|
||||
|
||||
elif self.type == 'final':
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue