mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 13:02:15 -07:00
fix ConverterMasked.py,
changing requirements changing device.py ENV vars
This commit is contained in:
parent
c3eea0cf98
commit
5587c93e01
7 changed files with 28 additions and 40 deletions
|
@ -136,9 +136,15 @@ class ConverterMasked(Converter):
|
|||
debugs += [img_face_mask_aaa.copy()]
|
||||
|
||||
if 'seamless' in self.mode:
|
||||
img_face_seamless_mask_aaa = img_face_mask_aaa.copy() #mask used for cv2.seamlessClone
|
||||
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa > 0.9] = 1.0
|
||||
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= 0.9] = 0.0
|
||||
#mask used for cv2.seamlessClone
|
||||
img_face_seamless_mask_aaa = None
|
||||
for i in range(9, 0, -1):
|
||||
a = img_face_mask_aaa > i / 10.0
|
||||
if len(np.argwhere(a)) == 0:
|
||||
continue
|
||||
img_face_seamless_mask_aaa = img_face_mask_aaa.copy()
|
||||
img_face_seamless_mask_aaa[a] = 1.0
|
||||
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= i / 10.0] = 0.0
|
||||
|
||||
out_img = img_bgr.copy()
|
||||
|
||||
|
@ -155,8 +161,7 @@ class ConverterMasked(Converter):
|
|||
if self.raw_mode == 'predicted-only':
|
||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(out_img.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||
|
||||
else:
|
||||
|
||||
elif ('seamless' not in self.mode) or (img_face_seamless_mask_aaa is not None):
|
||||
#averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
|
||||
ar = []
|
||||
for i in range(1, 10):
|
||||
|
|
|
@ -3,10 +3,9 @@ import json
|
|||
import numpy as np
|
||||
from .pynvml import *
|
||||
|
||||
|
||||
#you can force_tf_min_req_cap 35, if your DFL is built for tf==1.5.0
|
||||
#you can set DFL_TF_MIN_REQ_CAP manually for your build
|
||||
#the reason why we cannot check tensorflow.version is it requires import tensorflow
|
||||
tf_min_req_cap = int(os.environ.get("force_tf_min_req_cap", 37))
|
||||
tf_min_req_cap = int(os.environ.get("DFL_TF_MIN_REQ_CAP", 35))
|
||||
|
||||
class device:
|
||||
backend = None
|
||||
|
@ -260,14 +259,15 @@ class device:
|
|||
return result[0] * 10 + result[1]
|
||||
|
||||
|
||||
force_plaidML = os.environ.get("force_plaidML", "0") == "1"
|
||||
force_plaidML = os.environ.get("DFL_FORCE_PLAIDML", "0") == "1" #for OpenCL build , forcing using plaidML even if NVIDIA found
|
||||
force_tf_cpu = os.environ.get("DFL_FORCE_TF_CPU", "0") == "1" #for OpenCL build , forcing using tf-cpu if plaidML failed
|
||||
has_nvml = False
|
||||
has_nvml_cap = False
|
||||
|
||||
#use force_has_nvidia_device=1 if
|
||||
#use DFL_FORCE_HAS_NVIDIA_DEVICE=1 if
|
||||
#- your NVIDIA cannot be seen by OpenCL
|
||||
#- CUDA build of DFL
|
||||
has_nvidia_device = os.environ.get("force_has_nvidia_device", "0") == "1"
|
||||
has_nvidia_device = os.environ.get("DFL_FORCE_HAS_NVIDIA_DEVICE", "0") == "1"
|
||||
|
||||
plaidML_devices = []
|
||||
|
||||
|
@ -294,7 +294,7 @@ plaidML_devices_count = len(plaidML_devices)
|
|||
|
||||
#choosing backend
|
||||
|
||||
if device.backend is None:
|
||||
if device.backend is None and not force_tf_cpu:
|
||||
#first trying to load NVSMI and detect CUDA devices for tensorflow backend,
|
||||
#even force_plaidML is choosed, because if plaidML will fail, we can choose tensorflow
|
||||
try:
|
||||
|
@ -320,13 +320,15 @@ if device.backend is None:
|
|||
if not has_nvidia_device and (device.backend is None or force_plaidML):
|
||||
#tensorflow backend was failed without has_nvidia_device , or forcing plaidML, trying to use plaidML backend
|
||||
if plaidML_devices_count == 0:
|
||||
print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.")
|
||||
#print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.")
|
||||
device.backend = None
|
||||
else:
|
||||
device.backend = "plaidML"
|
||||
|
||||
if device.backend is None:
|
||||
if not has_nvml:
|
||||
if force_tf_cpu:
|
||||
device.backend = "tensorflow-cpu"
|
||||
elif not has_nvml:
|
||||
if has_nvidia_device:
|
||||
#some notebook systems have NVIDIA card without NVSMI in official drivers
|
||||
#in that case considering we have system with one capable GPU and let tensorflow to choose best GPU
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
numpy==1.16.1
|
||||
pathlib==1.0.1
|
||||
scandir==1.6
|
||||
h5py==2.7.1
|
||||
Keras==2.2.4
|
||||
opencv-python==4.0.0.21
|
||||
tensorflow-gpu==1.5.0
|
||||
plaidml-keras==0.5.0
|
||||
scikit-image
|
||||
dlib==19.10.0
|
||||
tqdm
|
||||
ffmpeg-python==0.1.17
|
||||
git+https://www.github.com/keras-team/keras-contrib.git
|
|
@ -1,10 +1,8 @@
|
|||
numpy==1.16.1
|
||||
pathlib==1.0.1
|
||||
scandir==1.6
|
||||
h5py==2.7.1
|
||||
h5py==2.9.0
|
||||
Keras==2.2.4
|
||||
opencv-python==4.0.0.21
|
||||
tensorflow==1.13.1
|
||||
tensorflow==1.12.0
|
||||
scikit-image
|
||||
dlib==19.16.0
|
||||
tqdm
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
numpy==1.16.1
|
||||
pathlib==1.0.1
|
||||
scandir==1.6
|
||||
h5py==2.7.1
|
||||
h5py==2.9.0
|
||||
Keras==2.2.4
|
||||
opencv-python==4.0.0.21
|
||||
tensorflow-gpu==1.13.1
|
||||
tensorflow-gpu==1.12.0
|
||||
plaidml-keras==0.5.0
|
||||
scikit-image
|
||||
dlib==19.16.0
|
|
@ -1,10 +1,8 @@
|
|||
numpy==1.16.1
|
||||
pathlib==1.0.1
|
||||
scandir==1.6
|
||||
h5py==2.7.1
|
||||
h5py==2.9.0
|
||||
Keras==2.2.4
|
||||
opencv-python==4.0.0.21
|
||||
tensorflow==1.13.1
|
||||
tensorflow==1.12.0
|
||||
plaidml-keras==0.5.0
|
||||
scikit-image
|
||||
tqdm
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from pathlib import Path
|
||||
from scandir import scandir
|
||||
from os import scandir
|
||||
|
||||
image_extensions = [".jpg", ".jpeg", ".png", ".tif", ".tiff"]
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue