mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-07 21:42:08 -07:00
nothing interesting
This commit is contained in:
parent
9f58d160a0
commit
bac9d5a99d
2 changed files with 100 additions and 13 deletions
|
@ -1,13 +1,16 @@
|
||||||
import colorsys
|
import colorsys
|
||||||
|
import math
|
||||||
|
from enum import IntEnum
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from enum import IntEnum
|
import numpy.linalg as npla
|
||||||
import mathlib
|
|
||||||
import imagelib
|
import imagelib
|
||||||
|
import mathlib
|
||||||
|
from facelib import FaceType
|
||||||
from imagelib import IEPolys
|
from imagelib import IEPolys
|
||||||
from mathlib.umeyama import umeyama
|
from mathlib.umeyama import umeyama
|
||||||
from facelib import FaceType
|
|
||||||
import math
|
|
||||||
|
|
||||||
landmarks_2D = np.array([
|
landmarks_2D = np.array([
|
||||||
[ 0.000213256, 0.106454 ], #17
|
[ 0.000213256, 0.106454 ], #17
|
||||||
|
@ -246,7 +249,7 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
|
||||||
|
|
||||||
return mat
|
return mat
|
||||||
|
|
||||||
def get_image_hull_mask (image_shape, image_landmarks, ie_polys=None):
|
def get_image_hull_mask (image_shape, image_landmarks, eyebrows_expand_mod=1.0, ie_polys=None):
|
||||||
if len(image_landmarks) != 68:
|
if len(image_landmarks) != 68:
|
||||||
raise Exception('get_image_hull_mask works only with 68 landmarks')
|
raise Exception('get_image_hull_mask works only with 68 landmarks')
|
||||||
int_lmrks = np.array(image_landmarks.copy(), dtype=np.int)
|
int_lmrks = np.array(image_landmarks.copy(), dtype=np.int)
|
||||||
|
@ -270,8 +273,8 @@ def get_image_hull_mask (image_shape, image_landmarks, ie_polys=None):
|
||||||
top_r = int_lmrks[22:27]
|
top_r = int_lmrks[22:27]
|
||||||
|
|
||||||
# Adjust eyebrow arrays
|
# Adjust eyebrow arrays
|
||||||
int_lmrks[17:22] = top_l + ((top_l - bot_l) // 2)
|
int_lmrks[17:22] = top_l + eyebrows_expand_mod * 0.5 * (top_l - bot_l)
|
||||||
int_lmrks[22:27] = top_r + ((top_r - bot_r) // 2)
|
int_lmrks[22:27] = top_r + eyebrows_expand_mod * 0.5 * (top_r - bot_r)
|
||||||
|
|
||||||
r_jaw = (int_lmrks[0:9], int_lmrks[17:18])
|
r_jaw = (int_lmrks[0:9], int_lmrks[17:18])
|
||||||
l_jaw = (int_lmrks[8:17], int_lmrks[26:27])
|
l_jaw = (int_lmrks[8:17], int_lmrks[26:27])
|
||||||
|
@ -394,7 +397,7 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=Fa
|
||||||
cv2.circle(image, (x, y), 2, color, lineType=cv2.LINE_AA)
|
cv2.circle(image, (x, y), 2, color, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
if transparent_mask:
|
if transparent_mask:
|
||||||
mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys)
|
mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys=ie_polys)
|
||||||
image[...] = ( image * (1-mask) + image * mask / 2 )[...]
|
image[...] = ( image * (1-mask) + image * mask / 2 )[...]
|
||||||
|
|
||||||
def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0)):
|
def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0)):
|
||||||
|
|
|
@ -53,10 +53,12 @@ Input = KL.Input
|
||||||
Dense = KL.Dense
|
Dense = KL.Dense
|
||||||
Conv2D = nnlib.Conv2D
|
Conv2D = nnlib.Conv2D
|
||||||
Conv2DTranspose = nnlib.Conv2DTranspose
|
Conv2DTranspose = nnlib.Conv2DTranspose
|
||||||
|
EqualConv2D = nnlib.EqualConv2D
|
||||||
SeparableConv2D = KL.SeparableConv2D
|
SeparableConv2D = KL.SeparableConv2D
|
||||||
MaxPooling2D = KL.MaxPooling2D
|
MaxPooling2D = KL.MaxPooling2D
|
||||||
UpSampling2D = KL.UpSampling2D
|
UpSampling2D = KL.UpSampling2D
|
||||||
BatchNormalization = KL.BatchNormalization
|
BatchNormalization = KL.BatchNormalization
|
||||||
|
PixelNormalization = nnlib.PixelNormalization
|
||||||
|
|
||||||
LeakyReLU = KL.LeakyReLU
|
LeakyReLU = KL.LeakyReLU
|
||||||
ReLU = KL.ReLU
|
ReLU = KL.ReLU
|
||||||
|
@ -809,6 +811,88 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
||||||
return self.func(x)
|
return self.func(x)
|
||||||
nnlib.Conv2DTranspose = Conv2DTranspose
|
nnlib.Conv2DTranspose = Conv2DTranspose
|
||||||
|
|
||||||
|
class EqualConv2D(KL.Conv2D):
|
||||||
|
def __init__(self, filters,
|
||||||
|
kernel_size,
|
||||||
|
strides=(1, 1),
|
||||||
|
padding='valid',
|
||||||
|
data_format=None,
|
||||||
|
dilation_rate=(1, 1),
|
||||||
|
activation=None,
|
||||||
|
use_bias=True,
|
||||||
|
gain=np.sqrt(2),
|
||||||
|
**kwargs):
|
||||||
|
super().__init__(
|
||||||
|
filters=filters,
|
||||||
|
kernel_size=kernel_size,
|
||||||
|
strides=strides,
|
||||||
|
padding=padding,
|
||||||
|
data_format=data_format,
|
||||||
|
dilation_rate=dilation_rate,
|
||||||
|
activation=activation,
|
||||||
|
use_bias=use_bias,
|
||||||
|
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=1.0),
|
||||||
|
bias_initializer='zeros',
|
||||||
|
kernel_regularizer=None,
|
||||||
|
bias_regularizer=None,
|
||||||
|
activity_regularizer=None,
|
||||||
|
kernel_constraint=None,
|
||||||
|
bias_constraint=None,
|
||||||
|
**kwargs)
|
||||||
|
self.gain = gain
|
||||||
|
|
||||||
|
def build(self, input_shape):
|
||||||
|
super().build(input_shape)
|
||||||
|
|
||||||
|
self.wscale = self.gain / np.sqrt( np.prod( K.int_shape(self.kernel)[:-1]) )
|
||||||
|
self.wscale_t = K.constant (self.wscale, dtype=K.floatx() )
|
||||||
|
|
||||||
|
def call(self, inputs):
|
||||||
|
k = self.kernel * self.wscale_t
|
||||||
|
|
||||||
|
outputs = K.conv2d(
|
||||||
|
inputs,
|
||||||
|
k,
|
||||||
|
strides=self.strides,
|
||||||
|
padding=self.padding,
|
||||||
|
data_format=self.data_format,
|
||||||
|
dilation_rate=self.dilation_rate)
|
||||||
|
|
||||||
|
if self.use_bias:
|
||||||
|
outputs = K.bias_add(
|
||||||
|
outputs,
|
||||||
|
self.bias,
|
||||||
|
data_format=self.data_format)
|
||||||
|
|
||||||
|
if self.activation is not None:
|
||||||
|
return self.activation(outputs)
|
||||||
|
return outputs
|
||||||
|
nnlib.EqualConv2D = EqualConv2D
|
||||||
|
|
||||||
|
class PixelNormalization(KL.Layer):
|
||||||
|
# initialize the layer
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(PixelNormalization, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
# perform the operation
|
||||||
|
def call(self, inputs):
|
||||||
|
# calculate square pixel values
|
||||||
|
values = inputs**2.0
|
||||||
|
# calculate the mean pixel values
|
||||||
|
mean_values = K.mean(values, axis=-1, keepdims=True)
|
||||||
|
# ensure the mean is not zero
|
||||||
|
mean_values += 1.0e-8
|
||||||
|
# calculate the sqrt of the mean squared value (L2 norm)
|
||||||
|
l2 = K.sqrt(mean_values)
|
||||||
|
# normalize values by the l2 norm
|
||||||
|
normalized = inputs / l2
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
# define the output shape of the layer
|
||||||
|
def compute_output_shape(self, input_shape):
|
||||||
|
return input_shape
|
||||||
|
nnlib.PixelNormalization = PixelNormalization
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def import_keras_contrib(device_config):
|
def import_keras_contrib(device_config):
|
||||||
if nnlib.keras_contrib is not None:
|
if nnlib.keras_contrib is not None:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue