diff --git a/converters/ConverterMasked.py b/converters/ConverterMasked.py index e1cd15c..ee71336 100644 --- a/converters/ConverterMasked.py +++ b/converters/ConverterMasked.py @@ -136,9 +136,15 @@ class ConverterMasked(Converter): debugs += [img_face_mask_aaa.copy()] if 'seamless' in self.mode: - img_face_seamless_mask_aaa = img_face_mask_aaa.copy() #mask used for cv2.seamlessClone - img_face_seamless_mask_aaa[img_face_seamless_mask_aaa > 0.9] = 1.0 - img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= 0.9] = 0.0 + #mask used for cv2.seamlessClone + img_face_seamless_mask_aaa = None + for i in range(9, 0, -1): + a = img_face_mask_aaa > i / 10.0 + if len(np.argwhere(a)) == 0: + continue + img_face_seamless_mask_aaa = img_face_mask_aaa.copy() + img_face_seamless_mask_aaa[a] = 1.0 + img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= i / 10.0] = 0.0 out_img = img_bgr.copy() @@ -155,8 +161,7 @@ class ConverterMasked(Converter): if self.raw_mode == 'predicted-only': out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(out_img.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ) - else: - + elif ('seamless' not in self.mode) or (img_face_seamless_mask_aaa is not None): #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask ar = [] for i in range(1, 10): diff --git a/nnlib/device.py b/nnlib/device.py index 27e973a..81164d4 100644 --- a/nnlib/device.py +++ b/nnlib/device.py @@ -3,10 +3,9 @@ import json import numpy as np from .pynvml import * - -#you can force_tf_min_req_cap 35, if your DFL is built for tf==1.5.0 +#you can set DFL_TF_MIN_REQ_CAP manually for your build #the reason why we cannot check tensorflow.version is it requires import tensorflow -tf_min_req_cap = int(os.environ.get("force_tf_min_req_cap", 37)) +tf_min_req_cap = int(os.environ.get("DFL_TF_MIN_REQ_CAP", 35)) class device: backend = None @@ -260,14 +259,15 @@ class device: return result[0] * 10 + result[1] -force_plaidML = os.environ.get("force_plaidML", "0") == "1" +force_plaidML = os.environ.get("DFL_FORCE_PLAIDML", "0") == "1" #for OpenCL build , forcing using plaidML even if NVIDIA found +force_tf_cpu = os.environ.get("DFL_FORCE_TF_CPU", "0") == "1" #for OpenCL build , forcing using tf-cpu if plaidML failed has_nvml = False has_nvml_cap = False -#use force_has_nvidia_device=1 if +#use DFL_FORCE_HAS_NVIDIA_DEVICE=1 if #- your NVIDIA cannot be seen by OpenCL #- CUDA build of DFL -has_nvidia_device = os.environ.get("force_has_nvidia_device", "0") == "1" +has_nvidia_device = os.environ.get("DFL_FORCE_HAS_NVIDIA_DEVICE", "0") == "1" plaidML_devices = [] @@ -294,7 +294,7 @@ plaidML_devices_count = len(plaidML_devices) #choosing backend -if device.backend is None: +if device.backend is None and not force_tf_cpu: #first trying to load NVSMI and detect CUDA devices for tensorflow backend, #even force_plaidML is choosed, because if plaidML will fail, we can choose tensorflow try: @@ -320,13 +320,15 @@ if device.backend is None: if not has_nvidia_device and (device.backend is None or force_plaidML): #tensorflow backend was failed without has_nvidia_device , or forcing plaidML, trying to use plaidML backend if plaidML_devices_count == 0: - print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.") + #print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.") device.backend = None else: device.backend = "plaidML" if device.backend is None: - if not has_nvml: + if force_tf_cpu: + device.backend = "tensorflow-cpu" + elif not has_nvml: if has_nvidia_device: #some notebook systems have NVIDIA card without NVSMI in official drivers #in that case considering we have system with one capable GPU and let tensorflow to choose best GPU diff --git a/requirements-SSE-cuda9.0-cudnn7.1.4.txt b/requirements-SSE-cuda9.0-cudnn7.1.4.txt deleted file mode 100644 index 2a54fc1..0000000 --- a/requirements-SSE-cuda9.0-cudnn7.1.4.txt +++ /dev/null @@ -1,13 +0,0 @@ -numpy==1.16.1 -pathlib==1.0.1 -scandir==1.6 -h5py==2.7.1 -Keras==2.2.4 -opencv-python==4.0.0.21 -tensorflow-gpu==1.5.0 -plaidml-keras==0.5.0 -scikit-image -dlib==19.10.0 -tqdm -ffmpeg-python==0.1.17 -git+https://www.github.com/keras-team/keras-contrib.git diff --git a/requirements-cpu.txt b/requirements-cpu.txt index bfb1cc1..5412e7d 100644 --- a/requirements-cpu.txt +++ b/requirements-cpu.txt @@ -1,10 +1,8 @@ numpy==1.16.1 -pathlib==1.0.1 -scandir==1.6 -h5py==2.7.1 +h5py==2.9.0 Keras==2.2.4 opencv-python==4.0.0.21 -tensorflow==1.13.1 +tensorflow==1.12.0 scikit-image dlib==19.16.0 tqdm diff --git a/requirements-AVX-cuda10.0-cudnn7.4.1.txt b/requirements-cuda.txt similarity index 74% rename from requirements-AVX-cuda10.0-cudnn7.4.1.txt rename to requirements-cuda.txt index 8ab28c1..384c40e 100644 --- a/requirements-AVX-cuda10.0-cudnn7.4.1.txt +++ b/requirements-cuda.txt @@ -1,10 +1,8 @@ numpy==1.16.1 -pathlib==1.0.1 -scandir==1.6 -h5py==2.7.1 +h5py==2.9.0 Keras==2.2.4 opencv-python==4.0.0.21 -tensorflow-gpu==1.13.1 +tensorflow-gpu==1.12.0 plaidml-keras==0.5.0 scikit-image dlib==19.16.0 diff --git a/requirements-opencl.txt b/requirements-opencl.txt index 7f51c18..2ba2274 100644 --- a/requirements-opencl.txt +++ b/requirements-opencl.txt @@ -1,10 +1,8 @@ numpy==1.16.1 -pathlib==1.0.1 -scandir==1.6 -h5py==2.7.1 +h5py==2.9.0 Keras==2.2.4 opencv-python==4.0.0.21 -tensorflow==1.13.1 +tensorflow==1.12.0 plaidml-keras==0.5.0 scikit-image tqdm diff --git a/utils/Path_utils.py b/utils/Path_utils.py index 5305956..14a181d 100644 --- a/utils/Path_utils.py +++ b/utils/Path_utils.py @@ -1,5 +1,5 @@ from pathlib import Path -from scandir import scandir +from os import scandir image_extensions = [".jpg", ".jpeg", ".png", ".tif", ".tiff"]