New script:

5.XSeg) data_dst/src mask for XSeg trainer - fetch.bat
Copies faces containing XSeg polygons to aligned_xseg\ dir.
Useful only if you want to collect labeled faces and reuse them in other fakes.

Now you can use trained XSeg mask in the SAEHD training process.
It’s mean default ‘full_face’ mask obtained from landmarks will be replaced with the mask obtained from the trained XSeg model.
use
5.XSeg.optional) trained mask for data_dst/data_src - apply.bat
5.XSeg.optional) trained mask for data_dst/data_src - remove.bat

Normally you don’t need it. You can use it, if you want to use ‘face_style’ and ‘bg_style’ with obstructions.

XSeg trainer : now you can choose type of face
XSeg trainer : now you can restart training in “override settings”
Merger: XSeg-* modes now can be used with all types of faces.

Therefore old MaskEditor, FANSEG models, and FAN-x modes have been removed,
because the new XSeg solution is better, simpler and more convenient, which costs only 1 hour of manual masking for regular deepfake.
This commit is contained in:
Colombo 2020-03-30 14:00:40 +04:00
commit 6d3607a13d
30 changed files with 279 additions and 1520 deletions

View file

@ -1,109 +0,0 @@
import numpy as np
import cv2
class IEPolysPoints:
def __init__(self, IEPolys_parent, type):
self.parent = IEPolys_parent
self.type = type
self.points = np.empty( (0,2), dtype=np.int32 )
self.n_max = self.n = 0
def add(self,x,y):
self.points = np.append(self.points[0:self.n], [ (x,y) ], axis=0)
self.n_max = self.n = self.n + 1
self.parent.dirty = True
def n_dec(self):
self.n = max(0, self.n-1)
self.parent.dirty = True
return self.n
def n_inc(self):
self.n = min(len(self.points), self.n+1)
self.parent.dirty = True
return self.n
def n_clip(self):
self.points = self.points[0:self.n]
self.n_max = self.n
def cur_point(self):
return self.points[self.n-1]
def points_to_n(self):
return self.points[0:self.n]
def set_points(self, points):
self.points = np.array(points)
self.n_max = self.n = len(points)
self.parent.dirty = True
class IEPolys:
def __init__(self):
self.list = []
self.n_max = self.n = 0
self.dirty = True
def add(self, type):
self.list = self.list[0:self.n]
l = IEPolysPoints(self, type)
self.list.append ( l )
self.n_max = self.n = self.n + 1
self.dirty = True
return l
def n_dec(self):
self.n = max(0, self.n-1)
self.dirty = True
return self.n
def n_inc(self):
self.n = min(len(self.list), self.n+1)
self.dirty = True
return self.n
def n_list(self):
return self.list[self.n-1]
def n_clip(self):
self.list = self.list[0:self.n]
self.n_max = self.n
if self.n > 0:
self.list[-1].n_clip()
def __iter__(self):
for n in range(self.n):
yield self.list[n]
def switch_dirty(self):
d = self.dirty
self.dirty = False
return d
def overlay_mask(self, mask):
h,w,c = mask.shape
white = (1,)*c
black = (0,)*c
for n in range(self.n):
poly = self.list[n]
if poly.n > 0:
cv2.fillPoly(mask, [poly.points_to_n()], white if poly.type == 1 else black )
def get_total_points(self):
return sum([self.list[n].n for n in range(self.n)])
def dump(self):
result = []
for n in range(self.n):
l = self.list[n]
result += [ (l.type, l.points_to_n().tolist() ) ]
return result
@staticmethod
def load(ie_polys=None):
obj = IEPolys()
if ie_polys is not None and isinstance(ie_polys, list):
for (type, points) in ie_polys:
obj.add(type)
obj.n_list().set_points(points)
return obj

View file

@ -15,7 +15,6 @@ from .color_transfer import color_transfer, color_transfer_mix, color_transfer_s
from .common import normalize_channels, cut_odd_image, overlay_alpha_image
from .IEPolys import IEPolys
from .SegIEPolys import *
from .blursharpen import LinearMotionBlur, blursharpen

View file

@ -1,92 +0,0 @@
"""
using https://github.com/ternaus/TernausNet
TernausNet: U-Net with VGG11 Encoder Pre-Trained on ImageNet for Image Segmentation
"""
from core.leras import nn
tf = nn.tf
class Ternaus(nn.ModelBase):
def on_build(self, in_ch, base_ch):
self.features_0 = nn.Conv2D (in_ch, base_ch, kernel_size=3, padding='SAME')
self.features_3 = nn.Conv2D (base_ch, base_ch*2, kernel_size=3, padding='SAME')
self.features_6 = nn.Conv2D (base_ch*2, base_ch*4, kernel_size=3, padding='SAME')
self.features_8 = nn.Conv2D (base_ch*4, base_ch*4, kernel_size=3, padding='SAME')
self.features_11 = nn.Conv2D (base_ch*4, base_ch*8, kernel_size=3, padding='SAME')
self.features_13 = nn.Conv2D (base_ch*8, base_ch*8, kernel_size=3, padding='SAME')
self.features_16 = nn.Conv2D (base_ch*8, base_ch*8, kernel_size=3, padding='SAME')
self.features_18 = nn.Conv2D (base_ch*8, base_ch*8, kernel_size=3, padding='SAME')
self.blurpool_0 = nn.BlurPool (filt_size=3)
self.blurpool_3 = nn.BlurPool (filt_size=3)
self.blurpool_8 = nn.BlurPool (filt_size=3)
self.blurpool_13 = nn.BlurPool (filt_size=3)
self.blurpool_18 = nn.BlurPool (filt_size=3)
self.conv_center = nn.Conv2D (base_ch*8, base_ch*8, kernel_size=3, padding='SAME')
self.conv1_up = nn.Conv2DTranspose (base_ch*8, base_ch*4, kernel_size=3, padding='SAME')
self.conv1 = nn.Conv2D (base_ch*12, base_ch*8, kernel_size=3, padding='SAME')
self.conv2_up = nn.Conv2DTranspose (base_ch*8, base_ch*4, kernel_size=3, padding='SAME')
self.conv2 = nn.Conv2D (base_ch*12, base_ch*8, kernel_size=3, padding='SAME')
self.conv3_up = nn.Conv2DTranspose (base_ch*8, base_ch*2, kernel_size=3, padding='SAME')
self.conv3 = nn.Conv2D (base_ch*6, base_ch*4, kernel_size=3, padding='SAME')
self.conv4_up = nn.Conv2DTranspose (base_ch*4, base_ch, kernel_size=3, padding='SAME')
self.conv4 = nn.Conv2D (base_ch*3, base_ch*2, kernel_size=3, padding='SAME')
self.conv5_up = nn.Conv2DTranspose (base_ch*2, base_ch//2, kernel_size=3, padding='SAME')
self.conv5 = nn.Conv2D (base_ch//2+base_ch, base_ch, kernel_size=3, padding='SAME')
self.out_conv = nn.Conv2D (base_ch, 1, kernel_size=3, padding='SAME')
def forward(self, inp):
x, = inp
x = x0 = tf.nn.relu(self.features_0(x))
x = self.blurpool_0(x)
x = x1 = tf.nn.relu(self.features_3(x))
x = self.blurpool_3(x)
x = tf.nn.relu(self.features_6(x))
x = x2 = tf.nn.relu(self.features_8(x))
x = self.blurpool_8(x)
x = tf.nn.relu(self.features_11(x))
x = x3 = tf.nn.relu(self.features_13(x))
x = self.blurpool_13(x)
x = tf.nn.relu(self.features_16(x))
x = x4 = tf.nn.relu(self.features_18(x))
x = self.blurpool_18(x)
x = self.conv_center(x)
x = tf.nn.relu(self.conv1_up(x))
x = tf.concat( [x,x4], nn.conv2d_ch_axis)
x = tf.nn.relu(self.conv1(x))
x = tf.nn.relu(self.conv2_up(x))
x = tf.concat( [x,x3], nn.conv2d_ch_axis)
x = tf.nn.relu(self.conv2(x))
x = tf.nn.relu(self.conv3_up(x))
x = tf.concat( [x,x2], nn.conv2d_ch_axis)
x = tf.nn.relu(self.conv3(x))
x = tf.nn.relu(self.conv4_up(x))
x = tf.concat( [x,x1], nn.conv2d_ch_axis)
x = tf.nn.relu(self.conv4(x))
x = tf.nn.relu(self.conv5_up(x))
x = tf.concat( [x,x0], nn.conv2d_ch_axis)
x = tf.nn.relu(self.conv5(x))
logits = self.out_conv(x)
return logits, tf.nn.sigmoid(logits)
nn.Ternaus = Ternaus

View file

@ -1,5 +1,4 @@
from .ModelBase import *
from .PatchDiscriminator import *
from .CodeDiscriminator import *
from .Ternaus import *
from .XSeg import *