mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-15 01:23:44 -07:00
DFL-2.0 initial branch commit
This commit is contained in:
parent
52a67a61b3
commit
38b85108b3
154 changed files with 5251 additions and 9414 deletions
|
@ -2,22 +2,23 @@ import multiprocessing
|
|||
import shutil
|
||||
|
||||
from DFLIMG import *
|
||||
from interact import interact as io
|
||||
from joblib import Subprocessor
|
||||
from nnlib import nnlib
|
||||
from utils import Path_utils
|
||||
from utils.cv2_utils import *
|
||||
from core.interact import interact as io
|
||||
from core.joblib import Subprocessor
|
||||
from core.leras import nn
|
||||
from core import pathex
|
||||
from core.cv2ex import *
|
||||
|
||||
|
||||
class FacesetEnhancerSubprocessor(Subprocessor):
|
||||
|
||||
#override
|
||||
def __init__(self, image_paths, output_dirpath, multi_gpu=False, cpu_only=False):
|
||||
def __init__(self, image_paths, output_dirpath, device_config):
|
||||
self.image_paths = image_paths
|
||||
self.output_dirpath = output_dirpath
|
||||
self.result = []
|
||||
self.devices = FacesetEnhancerSubprocessor.get_devices_for_config(multi_gpu, cpu_only)
|
||||
|
||||
self.nn_initialize_mp_lock = multiprocessing.Lock()
|
||||
self.devices = FacesetEnhancerSubprocessor.get_devices_for_config(device_config)
|
||||
|
||||
super().__init__('FacesetEnhancer', FacesetEnhancerSubprocessor.Cli, 600)
|
||||
|
||||
#override
|
||||
|
@ -30,7 +31,8 @@ class FacesetEnhancerSubprocessor(Subprocessor):
|
|||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
base_dict = {'output_dirpath':self.output_dirpath}
|
||||
base_dict = {'output_dirpath':self.output_dirpath,
|
||||
'nn_initialize_mp_lock': self.nn_initialize_mp_lock,}
|
||||
|
||||
for (device_idx, device_type, device_name, device_total_vram_gb) in self.devices:
|
||||
client_dict = base_dict.copy()
|
||||
|
@ -59,37 +61,13 @@ class FacesetEnhancerSubprocessor(Subprocessor):
|
|||
return self.result
|
||||
|
||||
@staticmethod
|
||||
def get_devices_for_config (multi_gpu, cpu_only):
|
||||
backend = nnlib.device.backend
|
||||
if 'cpu' in backend:
|
||||
cpu_only = True
|
||||
|
||||
if not cpu_only and backend == "plaidML":
|
||||
cpu_only = True
|
||||
|
||||
if not cpu_only:
|
||||
devices = []
|
||||
if multi_gpu:
|
||||
devices = nnlib.device.getValidDevicesWithAtLeastTotalMemoryGB(2)
|
||||
|
||||
if len(devices) == 0:
|
||||
idx = nnlib.device.getBestValidDeviceIdx()
|
||||
if idx != -1:
|
||||
devices = [idx]
|
||||
|
||||
if len(devices) == 0:
|
||||
cpu_only = True
|
||||
|
||||
result = []
|
||||
for idx in devices:
|
||||
dev_name = nnlib.device.getDeviceName(idx)
|
||||
dev_vram = nnlib.device.getDeviceVRAMTotalGb(idx)
|
||||
|
||||
result += [ (idx, 'GPU', dev_name, dev_vram) ]
|
||||
|
||||
return result
|
||||
|
||||
if cpu_only:
|
||||
def get_devices_for_config (device_config):
|
||||
devices = device_config.devices
|
||||
cpu_only = len(devices) == 0
|
||||
|
||||
if not cpu_only:
|
||||
return [ (device.index, 'GPU', device.name, device.total_mem_gb) for device in devices ]
|
||||
else:
|
||||
return [ (i, 'CPU', 'CPU%d' % (i), 0 ) for i in range( min(8, multiprocessing.cpu_count() // 2) ) ]
|
||||
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
@ -99,20 +77,23 @@ class FacesetEnhancerSubprocessor(Subprocessor):
|
|||
device_idx = client_dict['device_idx']
|
||||
cpu_only = client_dict['device_type'] == 'CPU'
|
||||
self.output_dirpath = client_dict['output_dirpath']
|
||||
|
||||
device_config = nnlib.DeviceConfig ( cpu_only=cpu_only, force_gpu_idx=device_idx, allow_growth=True)
|
||||
nnlib.import_all (device_config)
|
||||
|
||||
device_vram = device_config.gpu_vram_gb[0]
|
||||
nn_initialize_mp_lock = client_dict['nn_initialize_mp_lock']
|
||||
|
||||
if cpu_only:
|
||||
device_config = nn.DeviceConfig.CPU()
|
||||
device_vram = 99
|
||||
else:
|
||||
device_config = nn.DeviceConfig.GPUIndexes ([device_idx])
|
||||
device_vram = device_config.devices[0].total_mem_gb
|
||||
|
||||
nn.initialize (device_config)
|
||||
|
||||
intro_str = 'Running on %s.' % (client_dict['device_name'])
|
||||
if not cpu_only and device_vram <= 2:
|
||||
intro_str += " Recommended to close all programs using this device."
|
||||
|
||||
|
||||
self.log_info (intro_str)
|
||||
|
||||
from facelib import FaceEnhancer
|
||||
self.fe = FaceEnhancer()
|
||||
from facelib import FaceEnhancer
|
||||
self.fe = FaceEnhancer( place_model_on_cpu=(device_vram<=2) )
|
||||
|
||||
#override
|
||||
def process_data(self, filepath):
|
||||
|
@ -137,7 +118,10 @@ class FacesetEnhancerSubprocessor(Subprocessor):
|
|||
|
||||
return (0, filepath, None)
|
||||
|
||||
def process_folder ( dirpath, multi_gpu=False, cpu_only=False ):
|
||||
def process_folder ( dirpath, cpu_only=False, force_gpu_idxs=None ):
|
||||
device_config = nn.DeviceConfig.GPUIndexes( force_gpu_idxs or nn.ask_choose_device_idxs(suggest_all_gpu=True) ) \
|
||||
if not cpu_only else nn.DeviceConfig.CPU()
|
||||
|
||||
output_dirpath = dirpath.parent / (dirpath.name + '_enhanced')
|
||||
output_dirpath.mkdir (exist_ok=True, parents=True)
|
||||
|
||||
|
@ -146,15 +130,15 @@ def process_folder ( dirpath, multi_gpu=False, cpu_only=False ):
|
|||
io.log_info (f"Enhancing faceset in {dirpath_parts}")
|
||||
io.log_info ( f"Processing to {output_dirpath_parts}")
|
||||
|
||||
output_images_paths = Path_utils.get_image_paths(output_dirpath)
|
||||
output_images_paths = pathex.get_image_paths(output_dirpath)
|
||||
if len(output_images_paths) > 0:
|
||||
for filename in output_images_paths:
|
||||
Path(filename).unlink()
|
||||
|
||||
image_paths = [Path(x) for x in Path_utils.get_image_paths( dirpath )]
|
||||
result = FacesetEnhancerSubprocessor ( image_paths, output_dirpath, multi_gpu=multi_gpu, cpu_only=cpu_only).run()
|
||||
image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )]
|
||||
result = FacesetEnhancerSubprocessor ( image_paths, output_dirpath, device_config=device_config).run()
|
||||
|
||||
is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ? (y/n skip:y) : ", True)
|
||||
is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
|
||||
if is_merge:
|
||||
io.log_info (f"Copying processed files to {dirpath_parts}")
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue