update == 04.20.2019 == (#242)

* superb improved fanseg

* _

* _

* added FANseg extractor for src and dst faces to use it in training

* -

* _

* _

* update to 'partial' func

* _

* trained FANSeg_256_full_face.h5,
new experimental models: AVATAR, RecycleGAN

* _

* _

* _

* fix for TCC mode cards(tesla), was conflict with plaidML initialization.

* _

* update manuals

* _
This commit is contained in:
iperov 2019-04-20 08:23:08 +04:00 committed by GitHub
commit 046649e6be
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 1152 additions and 329 deletions

View file

@ -274,28 +274,34 @@ has_nvml_cap = False
#- CUDA build of DFL
has_nvidia_device = os.environ.get("DFL_FORCE_HAS_NVIDIA_DEVICE", "0") == "1"
plaidML_devices = []
# Using plaidML OpenCL backend to determine system devices and has_nvidia_device
try:
os.environ['PLAIDML_EXPERIMENTAL'] = 'false' #this enables work plaidML without run 'plaidml-setup'
import plaidml
ctx = plaidml.Context()
for d in plaidml.devices(ctx, return_all=True)[0]:
details = json.loads(d.details)
if details['type'] == 'CPU': #skipping opencl-CPU
continue
if 'nvidia' in details['vendor'].lower():
has_nvidia_device = True
plaidML_devices += [ {'id':d.id,
'globalMemSize' : int(details['globalMemSize']),
'description' : d.description.decode()
}]
ctx.shutdown()
except:
pass
plaidML_devices_count = len(plaidML_devices)
plaidML_devices = None
def get_plaidML_devices():
global plaidML_devices
global has_nvidia_device
if plaidML_devices is None:
plaidML_devices = []
# Using plaidML OpenCL backend to determine system devices and has_nvidia_device
try:
os.environ['PLAIDML_EXPERIMENTAL'] = 'false' #this enables work plaidML without run 'plaidml-setup'
import plaidml
ctx = plaidml.Context()
for d in plaidml.devices(ctx, return_all=True)[0]:
details = json.loads(d.details)
if details['type'] == 'CPU': #skipping opencl-CPU
continue
if 'nvidia' in details['vendor'].lower():
has_nvidia_device = True
plaidML_devices += [ {'id':d.id,
'globalMemSize' : int(details['globalMemSize']),
'description' : d.description.decode()
}]
ctx.shutdown()
except:
pass
return plaidML_devices
if not has_nvidia_device:
get_plaidML_devices()
#choosing backend
@ -324,7 +330,7 @@ if device.backend is None and not force_tf_cpu:
if force_plaidML or (device.backend is None and not has_nvidia_device):
#tensorflow backend was failed without has_nvidia_device , or forcing plaidML, trying to use plaidML backend
if plaidML_devices_count == 0:
if len(get_plaidML_devices()) == 0:
#print ("plaidML: No capable OpenCL devices found. Falling back to tensorflow backend.")
device.backend = None
else:

View file

@ -52,7 +52,7 @@ Input = KL.Input
Dense = KL.Dense
Conv2D = nnlib.Conv2D
Conv2DTranspose = KL.Conv2DTranspose
Conv2DTranspose = nnlib.Conv2DTranspose
SeparableConv2D = KL.SeparableConv2D
MaxPooling2D = KL.MaxPooling2D
UpSampling2D = KL.UpSampling2D
@ -695,6 +695,26 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
x = ReflectionPadding2D( self.pad ) (x)
return self.func(x)
nnlib.Conv2D = Conv2D
class Conv2DTranspose():
def __init__ (self, *args, **kwargs):
self.reflect_pad = False
padding = kwargs.get('padding','')
if padding == 'zero':
kwargs['padding'] = 'same'
if padding == 'reflect':
kernel_size = kwargs['kernel_size']
if (kernel_size % 2) == 1:
self.pad = (kernel_size // 2,)*2
kwargs['padding'] = 'valid'
self.reflect_pad = True
self.func = keras.layers.Conv2DTranspose (*args, **kwargs)
def __call__(self,x):
if self.reflect_pad:
x = ReflectionPadding2D( self.pad ) (x)
return self.func(x)
nnlib.Conv2DTranspose = Conv2DTranspose
@staticmethod
def import_keras_contrib(device_config):