mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-22 06:23:20 -07:00
fix for TCC mode cards(tesla), was conflict with plaidML initialization.
This commit is contained in:
parent
0bdba117ef
commit
7fe4cd4643
1 changed files with 29 additions and 23 deletions
|
@ -274,28 +274,34 @@ has_nvml_cap = False
|
||||||
#- CUDA build of DFL
|
#- CUDA build of DFL
|
||||||
has_nvidia_device = os.environ.get("DFL_FORCE_HAS_NVIDIA_DEVICE", "0") == "1"
|
has_nvidia_device = os.environ.get("DFL_FORCE_HAS_NVIDIA_DEVICE", "0") == "1"
|
||||||
|
|
||||||
plaidML_devices = []
|
plaidML_devices = None
|
||||||
|
def get_plaidML_devices():
|
||||||
# Using plaidML OpenCL backend to determine system devices and has_nvidia_device
|
global plaidML_devices
|
||||||
try:
|
global has_nvidia_device
|
||||||
os.environ['PLAIDML_EXPERIMENTAL'] = 'false' #this enables work plaidML without run 'plaidml-setup'
|
if plaidML_devices is None:
|
||||||
import plaidml
|
plaidML_devices = []
|
||||||
ctx = plaidml.Context()
|
# Using plaidML OpenCL backend to determine system devices and has_nvidia_device
|
||||||
for d in plaidml.devices(ctx, return_all=True)[0]:
|
try:
|
||||||
details = json.loads(d.details)
|
os.environ['PLAIDML_EXPERIMENTAL'] = 'false' #this enables work plaidML without run 'plaidml-setup'
|
||||||
if details['type'] == 'CPU': #skipping opencl-CPU
|
import plaidml
|
||||||
continue
|
ctx = plaidml.Context()
|
||||||
if 'nvidia' in details['vendor'].lower():
|
for d in plaidml.devices(ctx, return_all=True)[0]:
|
||||||
has_nvidia_device = True
|
details = json.loads(d.details)
|
||||||
plaidML_devices += [ {'id':d.id,
|
if details['type'] == 'CPU': #skipping opencl-CPU
|
||||||
'globalMemSize' : int(details['globalMemSize']),
|
continue
|
||||||
'description' : d.description.decode()
|
if 'nvidia' in details['vendor'].lower():
|
||||||
}]
|
has_nvidia_device = True
|
||||||
ctx.shutdown()
|
plaidML_devices += [ {'id':d.id,
|
||||||
except:
|
'globalMemSize' : int(details['globalMemSize']),
|
||||||
pass
|
'description' : d.description.decode()
|
||||||
|
}]
|
||||||
plaidML_devices_count = len(plaidML_devices)
|
ctx.shutdown()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return plaidML_devices
|
||||||
|
|
||||||
|
if not has_nvidia_device:
|
||||||
|
get_plaidML_devices()
|
||||||
|
|
||||||
#choosing backend
|
#choosing backend
|
||||||
|
|
||||||
|
@ -324,7 +330,7 @@ if device.backend is None and not force_tf_cpu:
|
||||||
|
|
||||||
if force_plaidML or (device.backend is None and not has_nvidia_device):
|
if force_plaidML or (device.backend is None and not has_nvidia_device):
|
||||||
#tensorflow backend was failed without has_nvidia_device , or forcing plaidML, trying to use plaidML backend
|
#tensorflow backend was failed without has_nvidia_device , or forcing plaidML, trying to use plaidML backend
|
||||||
if plaidML_devices_count == 0:
|
if len(get_plaidML_devices()) == 0:
|
||||||
#print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.")
|
#print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.")
|
||||||
device.backend = None
|
device.backend = None
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue