mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 21:42:08 -07:00
now you can train models on multiple GPU's on same workspace without cloning any folders.
Model files names will be prefixed with GPU index if GPU choosed explicitly on train/convert start. if you leave GPU idx choice default, then best GPU idx will be choosed and model file names will not contain index prefix. It gives you possibility to train same fake with various models or options on multiple GPUs. H64 and H128: now you can choose 'Lighter autoencoder'. It is same as vram gb <= 4 before this update. added archived_models.zip contains old experiments RecycleGAN: archived devicelib: if your system has no NVML installed (some old cards), then it will work with gpu_idx=0 as 'Generic GeForce GPU' with 2GB vram. refactorings
This commit is contained in:
parent
e2f4677987
commit
1f2b1481ef
9 changed files with 180 additions and 479 deletions
|
@ -1,20 +1,26 @@
|
|||
from .pynvml import *
|
||||
|
||||
try:
|
||||
nvmlInit()
|
||||
hasNVML = True
|
||||
except:
|
||||
hasNVML = False
|
||||
|
||||
class devicelib:
|
||||
class Config():
|
||||
force_best_gpu_idx = -1
|
||||
force_gpu_idx = -1
|
||||
multi_gpu = False
|
||||
force_gpu_idxs = None
|
||||
choose_worst_gpu = False
|
||||
gpu_idxs = []
|
||||
gpu_names = []
|
||||
gpu_total_vram_gb = 0
|
||||
gpu_compute_caps = []
|
||||
gpu_vram_gb = []
|
||||
allow_growth = True
|
||||
use_fp16 = False
|
||||
cpu_only = False
|
||||
|
||||
def __init__ (self, force_best_gpu_idx = -1,
|
||||
def __init__ (self, force_gpu_idx = -1,
|
||||
multi_gpu = False,
|
||||
force_gpu_idxs = None,
|
||||
choose_worst_gpu = False,
|
||||
|
@ -27,219 +33,154 @@ class devicelib:
|
|||
if cpu_only:
|
||||
self.cpu_only = True
|
||||
else:
|
||||
self.force_best_gpu_idx = force_best_gpu_idx
|
||||
self.force_gpu_idx = force_gpu_idx
|
||||
self.multi_gpu = multi_gpu
|
||||
self.force_gpu_idxs = force_gpu_idxs
|
||||
self.choose_worst_gpu = choose_worst_gpu
|
||||
self.allow_growth = allow_growth
|
||||
|
||||
self.gpu_idxs = []
|
||||
|
||||
if not devicelib.hasNVML():
|
||||
self.gpu_idxs = [0]
|
||||
self.gpu_total_vram_gb = 2
|
||||
self.gpu_names += ['Generic GeForce GPU']
|
||||
self.gpu_compute_caps += [ 50 ]
|
||||
|
||||
if force_gpu_idxs is not None:
|
||||
for idx in force_gpu_idxs.split(','):
|
||||
idx = int(idx)
|
||||
if devicelib.isValidDeviceIdx(idx):
|
||||
self.gpu_idxs.append(idx)
|
||||
else:
|
||||
if force_gpu_idxs is not None:
|
||||
for idx in force_gpu_idxs.split(','):
|
||||
idx = int(idx)
|
||||
if devicelib.isValidDeviceIdx(idx):
|
||||
self.gpu_idxs.append(idx)
|
||||
else:
|
||||
gpu_idx = force_best_gpu_idx if (force_best_gpu_idx >= 0 and devicelib.isValidDeviceIdx(force_best_gpu_idx)) else devicelib.getBestDeviceIdx() if not choose_worst_gpu else devicelib.getWorstDeviceIdx()
|
||||
if gpu_idx != -1:
|
||||
if self.multi_gpu:
|
||||
self.gpu_idxs = devicelib.getDeviceIdxsEqualModel( gpu_idx )
|
||||
if len(self.gpu_idxs) <= 1:
|
||||
self.multi_gpu = False
|
||||
else:
|
||||
self.gpu_idxs = [gpu_idx]
|
||||
|
||||
self.cpu_only = (len(self.gpu_idxs) == 0)
|
||||
|
||||
if not self.cpu_only:
|
||||
self.gpu_total_vram_gb = devicelib.getDeviceVRAMTotalGb ( self.gpu_idxs[0] )
|
||||
self.gpu_names = []
|
||||
self.gpu_compute_caps = []
|
||||
for gpu_idx in self.gpu_idxs:
|
||||
self.gpu_names += [devicelib.getDeviceName(gpu_idx)]
|
||||
self.gpu_compute_caps += [ devicelib.getDeviceComputeCapability ( gpu_idx ) ]
|
||||
|
||||
@staticmethod
|
||||
def hasNVML():
|
||||
try:
|
||||
nvmlInit()
|
||||
nvmlShutdown()
|
||||
except:
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def getDevicesWithAtLeastFreeMemory(freememsize):
|
||||
result = []
|
||||
try:
|
||||
nvmlInit()
|
||||
for i in range(0, nvmlDeviceGetCount() ):
|
||||
handle = nvmlDeviceGetHandleByIndex(i)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
if (memInfo.total - memInfo.used) >= freememsize:
|
||||
result.append (i)
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
|
||||
gpu_idx = force_gpu_idx if (force_gpu_idx >= 0 and devicelib.isValidDeviceIdx(force_gpu_idx)) else devicelib.getBestDeviceIdx() if not choose_worst_gpu else devicelib.getWorstDeviceIdx()
|
||||
if gpu_idx != -1:
|
||||
if self.multi_gpu:
|
||||
self.gpu_idxs = devicelib.getDeviceIdxsEqualModel( gpu_idx )
|
||||
if len(self.gpu_idxs) <= 1:
|
||||
self.multi_gpu = False
|
||||
else:
|
||||
self.gpu_idxs = [gpu_idx]
|
||||
|
||||
self.cpu_only = (len(self.gpu_idxs) == 0)
|
||||
|
||||
if not self.cpu_only:
|
||||
self.gpu_names = []
|
||||
self.gpu_compute_caps = []
|
||||
for gpu_idx in self.gpu_idxs:
|
||||
self.gpu_names += [devicelib.getDeviceName(gpu_idx)]
|
||||
self.gpu_compute_caps += [ devicelib.getDeviceComputeCapability ( gpu_idx ) ]
|
||||
self.gpu_vram_gb += [ devicelib.getDeviceVRAMTotalGb ( gpu_idx ) ]
|
||||
|
||||
@staticmethod
|
||||
def getDevicesWithAtLeastTotalMemoryGB(totalmemsize_gb):
|
||||
if not hasNVML and totalmemsize_gb <= 2:
|
||||
return [0]
|
||||
|
||||
result = []
|
||||
try:
|
||||
nvmlInit()
|
||||
for i in range(0, nvmlDeviceGetCount() ):
|
||||
handle = nvmlDeviceGetHandleByIndex(i)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
if (memInfo.total) >= totalmemsize_gb*1024*1024*1024:
|
||||
result.append (i)
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
for i in range(nvmlDeviceGetCount()):
|
||||
handle = nvmlDeviceGetHandleByIndex(i)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
if (memInfo.total) >= totalmemsize_gb*1024*1024*1024:
|
||||
result.append (i)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def getAllDevicesIdxsList ():
|
||||
result = []
|
||||
try:
|
||||
nvmlInit()
|
||||
result = [ i for i in range(0, nvmlDeviceGetCount() ) ]
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
def getAllDevicesIdxsList():
|
||||
if not hasNVML:
|
||||
return [0]
|
||||
|
||||
return [ i for i in range(0, nvmlDeviceGetCount() ) ]
|
||||
|
||||
@staticmethod
|
||||
def getAllDevicesIdxsWithNamesList ():
|
||||
result = []
|
||||
try:
|
||||
nvmlInit()
|
||||
result = [ (i, nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() ) for i in range(0, nvmlDeviceGetCount() ) ]
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
def getAllDevicesIdxsWithNamesList():
|
||||
if not hasNVML:
|
||||
return [ (0, devicelib.getDeviceName(0) ) ]
|
||||
|
||||
return [ (i, nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() ) for i in range(nvmlDeviceGetCount() ) ]
|
||||
|
||||
@staticmethod
|
||||
def getDeviceVRAMFree (idx):
|
||||
result = 0
|
||||
try:
|
||||
nvmlInit()
|
||||
if idx < nvmlDeviceGetCount():
|
||||
handle = nvmlDeviceGetHandleByIndex(idx)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
result = (memInfo.total - memInfo.used)
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
if not hasNVML:
|
||||
return 2
|
||||
|
||||
if idx < nvmlDeviceGetCount():
|
||||
memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(idx) )
|
||||
return memInfo.total - memInfo.used
|
||||
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def getDeviceVRAMTotalGb (idx):
|
||||
result = 2
|
||||
try:
|
||||
nvmlInit()
|
||||
if idx < nvmlDeviceGetCount():
|
||||
handle = nvmlDeviceGetHandleByIndex(idx)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
result = memInfo.total / (1024*1024*1024)
|
||||
nvmlShutdown()
|
||||
result = round(result)
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
if not hasNVML:
|
||||
return 2
|
||||
|
||||
if idx < nvmlDeviceGetCount():
|
||||
memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(idx) )
|
||||
return round ( memInfo.total / (1024*1024*1024) )
|
||||
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def getBestDeviceIdx():
|
||||
idx = -1
|
||||
try:
|
||||
nvmlInit()
|
||||
idx_mem = 0
|
||||
for i in range(0, nvmlDeviceGetCount() ):
|
||||
handle = nvmlDeviceGetHandleByIndex(i)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
if memInfo.total > idx_mem:
|
||||
idx = i
|
||||
idx_mem = memInfo.total
|
||||
if not hasNVML:
|
||||
return 0
|
||||
|
||||
idx = -1
|
||||
idx_mem = 0
|
||||
for i in range( nvmlDeviceGetCount() ):
|
||||
memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(i) )
|
||||
if memInfo.total > idx_mem:
|
||||
idx = i
|
||||
idx_mem = memInfo.total
|
||||
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return idx
|
||||
|
||||
@staticmethod
|
||||
def getWorstDeviceIdx():
|
||||
idx = -1
|
||||
try:
|
||||
nvmlInit()
|
||||
|
||||
idx_mem = sys.maxsize
|
||||
for i in range(0, nvmlDeviceGetCount() ):
|
||||
handle = nvmlDeviceGetHandleByIndex(i)
|
||||
memInfo = nvmlDeviceGetMemoryInfo( handle )
|
||||
if memInfo.total < idx_mem:
|
||||
idx = i
|
||||
idx_mem = memInfo.total
|
||||
if not hasNVML:
|
||||
return 0
|
||||
|
||||
idx = -1
|
||||
idx_mem = sys.maxsize
|
||||
for i in range( nvmlDeviceGetCount() ):
|
||||
memInfo = nvmlDeviceGetMemoryInfo( nvmlDeviceGetHandleByIndex(i) )
|
||||
if memInfo.total < idx_mem:
|
||||
idx = i
|
||||
idx_mem = memInfo.total
|
||||
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return idx
|
||||
|
||||
@staticmethod
|
||||
def isValidDeviceIdx(idx):
|
||||
result = False
|
||||
try:
|
||||
nvmlInit()
|
||||
result = (idx < nvmlDeviceGetCount())
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
if not hasNVML:
|
||||
return (idx == 0)
|
||||
|
||||
return (idx < nvmlDeviceGetCount())
|
||||
|
||||
@staticmethod
|
||||
def getDeviceIdxsEqualModel(idx):
|
||||
result = []
|
||||
try:
|
||||
nvmlInit()
|
||||
idx_name = nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode()
|
||||
if not hasNVML:
|
||||
return [0] if idx == 0 else []
|
||||
|
||||
result = []
|
||||
idx_name = nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode()
|
||||
for i in range( nvmlDeviceGetCount() ):
|
||||
if nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() == idx_name:
|
||||
result.append (i)
|
||||
|
||||
for i in range(0, nvmlDeviceGetCount() ):
|
||||
if nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(i)).decode() == idx_name:
|
||||
result.append (i)
|
||||
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def getDeviceName (idx):
|
||||
result = 'Generic GeForce GPU'
|
||||
try:
|
||||
nvmlInit()
|
||||
if idx < nvmlDeviceGetCount():
|
||||
result = nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode()
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
if not hasNVML:
|
||||
return 'Generic GeForce GPU'
|
||||
|
||||
if idx < nvmlDeviceGetCount():
|
||||
return nvmlDeviceGetName(nvmlDeviceGetHandleByIndex(idx)).decode()
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def getDeviceComputeCapability(idx):
|
||||
result = 0
|
||||
try:
|
||||
nvmlInit()
|
||||
if idx < nvmlDeviceGetCount():
|
||||
result = nvmlDeviceGetCudaComputeCapability(nvmlDeviceGetHandleByIndex(idx))
|
||||
nvmlShutdown()
|
||||
except:
|
||||
pass
|
||||
return result[0] * 10 + result[1]
|
||||
if not hasNVML:
|
||||
return 99 if idx == 0 else 0
|
||||
|
||||
result = 0
|
||||
if idx < nvmlDeviceGetCount():
|
||||
result = nvmlDeviceGetCudaComputeCapability(nvmlDeviceGetHandleByIndex(idx))
|
||||
return result[0] * 10 + result[1]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue