Merge commit 'a6438ca494' into merge-from-upstream/2021-06-02

# Conflicts:
#	core/imagelib/warp.py
#	core/leras/nn.py
#	main.py
#	mainscripts/Trainer.py
#	models/Model_SAEHD/Model.py
#	requirements-cuda.txt
This commit is contained in:
Jeremy Hummel 2021-06-02 09:07:17 -07:00
commit 07027f5d6e
36 changed files with 2057 additions and 347 deletions

View file

@ -26,9 +26,9 @@ https://arxiv.org/abs/2005.05535</a>
<p align="center"> <p align="center">
![](doc/logo_cuda.png)
![](doc/logo_tensorflow.png) ![](doc/logo_tensorflow.png)
![](doc/logo_python.png) ![](doc/logo_cuda.png)
![](doc/logo_directx.png)
</p> </p>
@ -201,7 +201,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul
</td></tr> </td></tr>
<tr><td align="right"> <tr><td align="right">
<a href="https://tinyurl.com/y8lntghz">Windows (magnet link)</a> <a href="https://tinyurl.com/87vwbtr4">Windows (magnet link)</a>
</td><td align="center">Last release. Use torrent client to download.</td></tr> </td><td align="center">Last release. Use torrent client to download.</td></tr>
<tr><td align="right"> <tr><td align="right">
@ -340,10 +340,6 @@ QQ 951138799
bitcoin:bc1qkhh7h0gwwhxgg6h6gpllfgstkd645fefrd5s6z bitcoin:bc1qkhh7h0gwwhxgg6h6gpllfgstkd645fefrd5s6z
</td></tr> </td></tr>
<tr><td align="right">
Alipay 捐款
</td><td align="center"> <img src="doc/Alipay_donation.jpg" align="center"> </td></tr>
<tr><td colspan=2 align="center"> <tr><td colspan=2 align="center">
### Collect facesets ### Collect facesets

View file

@ -17,6 +17,7 @@ class QIconDB():
QIconDB.poly_type_exclude = QIcon ( str(icon_path / 'poly_type_exclude.png') ) QIconDB.poly_type_exclude = QIcon ( str(icon_path / 'poly_type_exclude.png') )
QIconDB.left = QIcon ( str(icon_path / 'left.png') ) QIconDB.left = QIcon ( str(icon_path / 'left.png') )
QIconDB.right = QIcon ( str(icon_path / 'right.png') ) QIconDB.right = QIcon ( str(icon_path / 'right.png') )
QIconDB.trashcan = QIcon ( str(icon_path / 'trashcan.png') )
QIconDB.pt_edit_mode = QIcon ( str(icon_path / 'pt_edit_mode.png') ) QIconDB.pt_edit_mode = QIcon ( str(icon_path / 'pt_edit_mode.png') )
QIconDB.view_lock_center = QIcon ( str(icon_path / 'view_lock_center.png') ) QIconDB.view_lock_center = QIcon ( str(icon_path / 'view_lock_center.png') )
QIconDB.view_baked = QIcon ( str(icon_path / 'view_baked.png') ) QIconDB.view_baked = QIcon ( str(icon_path / 'view_baked.png') )

View file

@ -85,6 +85,11 @@ class QStringDB():
'zh' : '保存并转到下一张图片\n按住SHIFT : 加快\n按住CTRL : 跳过未标记的\n', 'zh' : '保存并转到下一张图片\n按住SHIFT : 加快\n按住CTRL : 跳过未标记的\n',
}[lang] }[lang]
QStringDB.btn_delete_image_tip = { 'en' : 'Move to _trash and Next image\n',
'ru' : 'Переместить в _trash и следующее изображение\n',
'zh' : '移至_trash转到下一张图片 ',
}[lang]
QStringDB.loading_tip = {'en' : 'Loading', QStringDB.loading_tip = {'en' : 'Loading',
'ru' : 'Загрузка', 'ru' : 'Загрузка',
'zh' : '正在载入', 'zh' : '正在载入',

View file

@ -1164,6 +1164,7 @@ class MainWindow(QXMainWindow):
super().__init__() super().__init__()
self.input_dirpath = input_dirpath self.input_dirpath = input_dirpath
self.trash_dirpath = input_dirpath.parent / (input_dirpath.name + '_trash')
self.cfg_root_path = cfg_root_path self.cfg_root_path = cfg_root_path
self.cfg_path = cfg_root_path / 'MainWindow_cfg.dat' self.cfg_path = cfg_root_path / 'MainWindow_cfg.dat'
@ -1341,7 +1342,18 @@ class MainWindow(QXMainWindow):
self.update_cached_images() self.update_cached_images()
self.update_preview_bar() self.update_preview_bar()
def trash_current_image(self):
self.process_next_image()
img_path = self.image_paths_done.pop(-1)
img_path = Path(img_path)
self.trash_dirpath.mkdir(parents=True, exist_ok=True)
img_path.rename( self.trash_dirpath / img_path.name )
self.update_cached_images()
self.update_preview_bar()
def initialize_ui(self): def initialize_ui(self):
self.canvas = QCanvas() self.canvas = QCanvas()
@ -1356,20 +1368,36 @@ class MainWindow(QXMainWindow):
btn_next_image = QXIconButton(QIconDB.right, QStringDB.btn_next_image_tip, shortcut='D', click_func=self.process_next_image) btn_next_image = QXIconButton(QIconDB.right, QStringDB.btn_next_image_tip, shortcut='D', click_func=self.process_next_image)
btn_next_image.setIconSize(QUIConfig.preview_bar_icon_q_size) btn_next_image.setIconSize(QUIConfig.preview_bar_icon_q_size)
btn_delete_image = QXIconButton(QIconDB.trashcan, QStringDB.btn_delete_image_tip, shortcut='X', click_func=self.trash_current_image)
btn_delete_image.setIconSize(QUIConfig.preview_bar_icon_q_size)
pad_image = QWidget()
pad_image.setFixedSize(QUIConfig.preview_bar_icon_q_size)
preview_image_bar_frame_l = QHBoxLayout() preview_image_bar_frame_l = QHBoxLayout()
preview_image_bar_frame_l.setContentsMargins(0,0,0,0) preview_image_bar_frame_l.setContentsMargins(0,0,0,0)
preview_image_bar_frame_l.addWidget ( pad_image, alignment=Qt.AlignCenter)
preview_image_bar_frame_l.addWidget ( btn_prev_image, alignment=Qt.AlignCenter) preview_image_bar_frame_l.addWidget ( btn_prev_image, alignment=Qt.AlignCenter)
preview_image_bar_frame_l.addWidget ( image_bar) preview_image_bar_frame_l.addWidget ( image_bar)
preview_image_bar_frame_l.addWidget ( btn_next_image, alignment=Qt.AlignCenter) preview_image_bar_frame_l.addWidget ( btn_next_image, alignment=Qt.AlignCenter)
#preview_image_bar_frame_l.addWidget ( btn_delete_image, alignment=Qt.AlignCenter)
preview_image_bar_frame = QFrame() preview_image_bar_frame = QFrame()
preview_image_bar_frame.setSizePolicy ( QSizePolicy.Fixed, QSizePolicy.Fixed ) preview_image_bar_frame.setSizePolicy ( QSizePolicy.Fixed, QSizePolicy.Fixed )
preview_image_bar_frame.setLayout(preview_image_bar_frame_l) preview_image_bar_frame.setLayout(preview_image_bar_frame_l)
preview_image_bar_l = QHBoxLayout() preview_image_bar_frame2_l = QHBoxLayout()
preview_image_bar_l.addWidget (preview_image_bar_frame) preview_image_bar_frame2_l.setContentsMargins(0,0,0,0)
preview_image_bar_frame2_l.addWidget ( btn_delete_image, alignment=Qt.AlignCenter)
preview_image_bar_frame2 = QFrame()
preview_image_bar_frame2.setSizePolicy ( QSizePolicy.Fixed, QSizePolicy.Fixed )
preview_image_bar_frame2.setLayout(preview_image_bar_frame2_l)
preview_image_bar_l = QHBoxLayout()
preview_image_bar_l.addWidget (preview_image_bar_frame, alignment=Qt.AlignCenter)
preview_image_bar_l.addWidget (preview_image_bar_frame2)
preview_image_bar = QFrame() preview_image_bar = QFrame()
preview_image_bar.setFrameShape(QFrame.StyledPanel) preview_image_bar.setFrameShape(QFrame.StyledPanel)
preview_image_bar.setSizePolicy ( QSizePolicy.Expanding, QSizePolicy.Fixed ) preview_image_bar.setSizePolicy ( QSizePolicy.Expanding, QSizePolicy.Fixed )

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

View file

@ -77,6 +77,8 @@ class SegIEPoly():
self.pts = np.array(pts) self.pts = np.array(pts)
self.n_max = self.n = len(pts) self.n_max = self.n = len(pts)
def mult_points(self, val):
self.pts *= val
@ -136,7 +138,11 @@ class SegIEPolys():
def dump(self): def dump(self):
return {'polys' : [ poly.dump() for poly in self.polys ] } return {'polys' : [ poly.dump() for poly in self.polys ] }
def mult_points(self, val):
for poly in self.polys:
poly.mult_points(val)
@staticmethod @staticmethod
def load(data=None): def load(data=None):
ie_polys = SegIEPolys() ie_polys = SegIEPolys()

View file

@ -14,14 +14,19 @@ from .reduce_colors import reduce_colors
from .color_transfer import color_transfer, color_transfer_mix, color_transfer_sot, color_transfer_mkl, color_transfer_idt, color_hist_match, reinhard_color_transfer, linear_color_transfer, color_augmentation from .color_transfer import color_transfer, color_transfer_mix, color_transfer_sot, color_transfer_mkl, color_transfer_idt, color_hist_match, reinhard_color_transfer, linear_color_transfer, color_augmentation
from .common import normalize_channels, cut_odd_image, overlay_alpha_image from .common import random_crop, normalize_channels, cut_odd_image, overlay_alpha_image
from .SegIEPolys import * from .SegIEPolys import *
from .blursharpen import LinearMotionBlur, blursharpen from .blursharpen import LinearMotionBlur, blursharpen
from .filters import apply_random_rgb_levels, \ from .filters import apply_random_rgb_levels, \
apply_random_overlay_triangle, \
apply_random_hsv_shift, \ apply_random_hsv_shift, \
apply_random_sharpen, \
apply_random_motion_blur, \ apply_random_motion_blur, \
apply_random_gaussian_blur, \ apply_random_gaussian_blur, \
apply_random_bilinear_resize apply_random_nearest_resize, \
apply_random_bilinear_resize, \
apply_random_jpeg_compress, \
apply_random_relight

View file

@ -1,5 +1,16 @@
import numpy as np import numpy as np
def random_crop(img, w, h):
height, width = img.shape[:2]
h_rnd = height - h
w_rnd = width - w
y = np.random.randint(0, h_rnd) if h_rnd > 0 else 0
x = np.random.randint(0, w_rnd) if w_rnd > 0 else 0
return img[y:y+height, x:x+width]
def normalize_channels(img, target_channels): def normalize_channels(img, target_channels):
img_shape_len = len(img.shape) img_shape_len = len(img.shape)
if img_shape_len == 2: if img_shape_len == 2:

View file

@ -1,47 +1,65 @@
import numpy as np import numpy as np
from .blursharpen import LinearMotionBlur from .blursharpen import LinearMotionBlur, blursharpen
import cv2 import cv2
def apply_random_rgb_levels(img, mask=None, rnd_state=None): def apply_random_rgb_levels(img, mask=None, rnd_state=None):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random
np_rnd = rnd_state.rand np_rnd = rnd_state.rand
inBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32) inBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32)
inWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32) inWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32)
inGamma = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32) inGamma = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32)
outBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32) outBlack = np.array([np_rnd()*0.25 , np_rnd()*0.25 , np_rnd()*0.25], dtype=np.float32)
outWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32) outWhite = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32)
result = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 ) result = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 )
result = ( result ** (1/inGamma) ) * (outWhite - outBlack) + outBlack result = ( result ** (1/inGamma) ) * (outWhite - outBlack) + outBlack
result = np.clip(result, 0, 1) result = np.clip(result, 0, 1)
if mask is not None: if mask is not None:
result = img*(1-mask) + result*mask result = img*(1-mask) + result*mask
return result return result
def apply_random_hsv_shift(img, mask=None, rnd_state=None): def apply_random_hsv_shift(img, mask=None, rnd_state=None):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random
h, s, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) h, s, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
h = ( h + rnd_state.randint(360) ) % 360 h = ( h + rnd_state.randint(360) ) % 360
s = np.clip ( s + rnd_state.random()-0.5, 0, 1 ) s = np.clip ( s + rnd_state.random()-0.5, 0, 1 )
v = np.clip ( v + rnd_state.random()-0.5, 0, 1 ) v = np.clip ( v + rnd_state.random()-0.5, 0, 1 )
result = np.clip( cv2.cvtColor(cv2.merge([h, s, v]), cv2.COLOR_HSV2BGR) , 0, 1 ) result = np.clip( cv2.cvtColor(cv2.merge([h, s, v]), cv2.COLOR_HSV2BGR) , 0, 1 )
if mask is not None: if mask is not None:
result = img*(1-mask) + result*mask result = img*(1-mask) + result*mask
return result return result
def apply_random_sharpen( img, chance, kernel_max_size, mask=None, rnd_state=None ):
if rnd_state is None:
rnd_state = np.random
sharp_rnd_kernel = rnd_state.randint(kernel_max_size)+1
result = img
if rnd_state.randint(100) < np.clip(chance, 0, 100):
if rnd_state.randint(2) == 0:
result = blursharpen(result, 1, sharp_rnd_kernel, rnd_state.randint(10) )
else:
result = blursharpen(result, 2, sharp_rnd_kernel, rnd_state.randint(50) )
if mask is not None:
result = img*(1-mask) + result*mask
return result
def apply_random_motion_blur( img, chance, mb_max_size, mask=None, rnd_state=None ): def apply_random_motion_blur( img, chance, mb_max_size, mask=None, rnd_state=None ):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random
mblur_rnd_kernel = rnd_state.randint(mb_max_size)+1 mblur_rnd_kernel = rnd_state.randint(mb_max_size)+1
mblur_rnd_deg = rnd_state.randint(360) mblur_rnd_deg = rnd_state.randint(360)
@ -50,38 +68,178 @@ def apply_random_motion_blur( img, chance, mb_max_size, mask=None, rnd_state=Non
result = LinearMotionBlur (result, mblur_rnd_kernel, mblur_rnd_deg ) result = LinearMotionBlur (result, mblur_rnd_kernel, mblur_rnd_deg )
if mask is not None: if mask is not None:
result = img*(1-mask) + result*mask result = img*(1-mask) + result*mask
return result return result
def apply_random_gaussian_blur( img, chance, kernel_max_size, mask=None, rnd_state=None ): def apply_random_gaussian_blur( img, chance, kernel_max_size, mask=None, rnd_state=None ):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random
result = img result = img
if rnd_state.randint(100) < np.clip(chance, 0, 100): if rnd_state.randint(100) < np.clip(chance, 0, 100):
gblur_rnd_kernel = rnd_state.randint(kernel_max_size)*2+1 gblur_rnd_kernel = rnd_state.randint(kernel_max_size)*2+1
result = cv2.GaussianBlur(result, (gblur_rnd_kernel,)*2 , 0) result = cv2.GaussianBlur(result, (gblur_rnd_kernel,)*2 , 0)
if mask is not None: if mask is not None:
result = img*(1-mask) + result*mask result = img*(1-mask) + result*mask
return result return result
def apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINEAR, mask=None, rnd_state=None ):
def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_state=None ):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random
result = img result = img
if rnd_state.randint(100) < np.clip(chance, 0, 100): if rnd_state.randint(100) < np.clip(chance, 0, 100):
h,w,c = result.shape h,w,c = result.shape
trg = rnd_state.rand() trg = rnd_state.rand()
rw = w - int( trg * int(w*(max_size_per/100.0)) ) rw = w - int( trg * int(w*(max_size_per/100.0)) )
rh = h - int( trg * int(h*(max_size_per/100.0)) ) rh = h - int( trg * int(h*(max_size_per/100.0)) )
result = cv2.resize (result, (rw,rh), interpolation=cv2.INTER_LINEAR ) result = cv2.resize (result, (rw,rh), interpolation=interpolation )
result = cv2.resize (result, (w,h), interpolation=cv2.INTER_LINEAR ) result = cv2.resize (result, (w,h), interpolation=interpolation )
if mask is not None: if mask is not None:
result = img*(1-mask) + result*mask result = img*(1-mask) + result*mask
return result
def apply_random_nearest_resize( img, chance, max_size_per, mask=None, rnd_state=None ):
return apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_NEAREST, mask=mask, rnd_state=rnd_state )
def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_state=None ):
return apply_random_resize( img, chance, max_size_per, interpolation=cv2.INTER_LINEAR, mask=mask, rnd_state=rnd_state )
def apply_random_jpeg_compress( img, chance, mask=None, rnd_state=None ):
if rnd_state is None:
rnd_state = np.random
result = img
if rnd_state.randint(100) < np.clip(chance, 0, 100):
h,w,c = result.shape
quality = rnd_state.randint(10,101)
ret, result = cv2.imencode('.jpg', np.clip(img*255, 0,255).astype(np.uint8), [int(cv2.IMWRITE_JPEG_QUALITY), quality] )
if ret == True:
result = cv2.imdecode(result, flags=cv2.IMREAD_UNCHANGED)
result = result.astype(np.float32) / 255.0
if mask is not None:
result = img*(1-mask) + result*mask
return result
def apply_random_overlay_triangle( img, max_alpha, mask=None, rnd_state=None ):
if rnd_state is None:
rnd_state = np.random
h,w,c = img.shape
pt1 = [rnd_state.randint(w), rnd_state.randint(h) ]
pt2 = [rnd_state.randint(w), rnd_state.randint(h) ]
pt3 = [rnd_state.randint(w), rnd_state.randint(h) ]
alpha = rnd_state.uniform()*max_alpha
tri_mask = cv2.fillPoly( np.zeros_like(img), [ np.array([pt1,pt2,pt3], np.int32) ], (alpha,)*c )
if rnd_state.randint(2) == 0:
result = np.clip(img+tri_mask, 0, 1)
else:
result = np.clip(img-tri_mask, 0, 1)
if mask is not None:
result = img*(1-mask) + result*mask
return result
def _min_resize(x, m):
if x.shape[0] < x.shape[1]:
s0 = m
s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1]))
else:
s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0]))
s1 = m
new_max = min(s1, s0)
raw_max = min(x.shape[0], x.shape[1])
return cv2.resize(x, (s1, s0), interpolation=cv2.INTER_LANCZOS4)
def _d_resize(x, d, fac=1.0):
new_min = min(int(d[1] * fac), int(d[0] * fac))
raw_min = min(x.shape[0], x.shape[1])
if new_min < raw_min:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (int(d[1] * fac), int(d[0] * fac)), interpolation=interpolation)
return y
def _get_image_gradient(dist):
cols = cv2.filter2D(dist, cv2.CV_32F, np.array([[-1, 0, +1], [-2, 0, +2], [-1, 0, +1]]))
rows = cv2.filter2D(dist, cv2.CV_32F, np.array([[-1, -2, -1], [0, 0, 0], [+1, +2, +1]]))
return cols, rows
def _generate_lighting_effects(content):
h512 = content
h256 = cv2.pyrDown(h512)
h128 = cv2.pyrDown(h256)
h64 = cv2.pyrDown(h128)
h32 = cv2.pyrDown(h64)
h16 = cv2.pyrDown(h32)
c512, r512 = _get_image_gradient(h512)
c256, r256 = _get_image_gradient(h256)
c128, r128 = _get_image_gradient(h128)
c64, r64 = _get_image_gradient(h64)
c32, r32 = _get_image_gradient(h32)
c16, r16 = _get_image_gradient(h16)
c = c16
c = _d_resize(cv2.pyrUp(c), c32.shape) * 4.0 + c32
c = _d_resize(cv2.pyrUp(c), c64.shape) * 4.0 + c64
c = _d_resize(cv2.pyrUp(c), c128.shape) * 4.0 + c128
c = _d_resize(cv2.pyrUp(c), c256.shape) * 4.0 + c256
c = _d_resize(cv2.pyrUp(c), c512.shape) * 4.0 + c512
r = r16
r = _d_resize(cv2.pyrUp(r), r32.shape) * 4.0 + r32
r = _d_resize(cv2.pyrUp(r), r64.shape) * 4.0 + r64
r = _d_resize(cv2.pyrUp(r), r128.shape) * 4.0 + r128
r = _d_resize(cv2.pyrUp(r), r256.shape) * 4.0 + r256
r = _d_resize(cv2.pyrUp(r), r512.shape) * 4.0 + r512
coarse_effect_cols = c
coarse_effect_rows = r
EPS = 1e-10
max_effect = np.max((coarse_effect_cols**2 + coarse_effect_rows**2)**0.5, axis=0, keepdims=True, ).max(1, keepdims=True)
coarse_effect_cols = (coarse_effect_cols + EPS) / (max_effect + EPS)
coarse_effect_rows = (coarse_effect_rows + EPS) / (max_effect + EPS)
return np.stack([ np.zeros_like(coarse_effect_rows), coarse_effect_rows, coarse_effect_cols], axis=-1)
def apply_random_relight(img, mask=None, rnd_state=None):
if rnd_state is None:
rnd_state = np.random
def_img = img
if rnd_state.randint(2) == 0:
light_pos_y = 1.0 if rnd_state.randint(2) == 0 else -1.0
light_pos_x = rnd_state.uniform()*2-1.0
else:
light_pos_y = rnd_state.uniform()*2-1.0
light_pos_x = 1.0 if rnd_state.randint(2) == 0 else -1.0
light_source_height = 0.3*rnd_state.uniform()*0.7
light_intensity = 1.0+rnd_state.uniform()
ambient_intensity = 0.5
light_source_location = np.array([[[light_source_height, light_pos_y, light_pos_x ]]], dtype=np.float32)
light_source_direction = light_source_location / np.sqrt(np.sum(np.square(light_source_location)))
lighting_effect = _generate_lighting_effects(img)
lighting_effect = np.sum(lighting_effect * light_source_direction, axis=-1).clip(0, 1)
lighting_effect = np.mean(lighting_effect, axis=-1, keepdims=True)
result = def_img * (ambient_intensity + lighting_effect * light_intensity) #light_source_color
result = np.clip(result, 0, 1)
if mask is not None:
result = def_img*(1-mask) + result*mask
return result return result

View file

@ -1,2 +1,2 @@
from .draw import * from .draw import circle_faded, random_circle_faded, bezier, random_bezier_split_faded, random_faded
from .calc import * from .calc import *

View file

@ -1,23 +1,36 @@
""" """
Signed distance drawing functions using numpy. Signed distance drawing functions using numpy.
""" """
import math
import numpy as np import numpy as np
from numpy import linalg as npla from numpy import linalg as npla
def circle_faded( hw, center, fade_dists ):
def vector2_dot(a,b):
return a[...,0]*b[...,0]+a[...,1]*b[...,1]
def vector2_dot2(a):
return a[...,0]*a[...,0]+a[...,1]*a[...,1]
def vector2_cross(a,b):
return a[...,0]*b[...,1]-a[...,1]*b[...,0]
def circle_faded( wh, center, fade_dists ):
""" """
returns drawn circle in [h,w,1] output range [0..1.0] float32 returns drawn circle in [h,w,1] output range [0..1.0] float32
hw = [h,w] resolution wh = [w,h] resolution
center = [y,x] center of circle center = [x,y] center of circle
fade_dists = [fade_start, fade_end] fade values fade_dists = [fade_start, fade_end] fade values
""" """
h,w = hw w,h = wh
pts = np.empty( (h,w,2), dtype=np.float32 ) pts = np.empty( (h,w,2), dtype=np.float32 )
pts[...,1] = np.arange(h)[None,:]
pts[...,0] = np.arange(w)[:,None] pts[...,0] = np.arange(w)[:,None]
pts[...,1] = np.arange(h)[None,:]
pts = pts.reshape ( (h*w, -1) ) pts = pts.reshape ( (h*w, -1) )
pts_dists = np.abs ( npla.norm(pts-center, axis=-1) ) pts_dists = np.abs ( npla.norm(pts-center, axis=-1) )
@ -30,15 +43,158 @@ def circle_faded( hw, center, fade_dists ):
pts_dists = np.clip( 1-pts_dists, 0, 1) pts_dists = np.clip( 1-pts_dists, 0, 1)
return pts_dists.reshape ( (h,w,1) ).astype(np.float32) return pts_dists.reshape ( (h,w,1) ).astype(np.float32)
def bezier( wh, A, B, C ):
"""
returns drawn bezier in [h,w,1] output range float32,
every pixel contains signed distance to bezier line
wh [w,h] resolution
A,B,C points [x,y]
"""
def random_circle_faded ( hw, rnd_state=None ): width,height = wh
A = np.float32(A)
B = np.float32(B)
C = np.float32(C)
pos = np.empty( (height,width,2), dtype=np.float32 )
pos[...,0] = np.arange(width)[:,None]
pos[...,1] = np.arange(height)[None,:]
a = B-A
b = A - 2.0*B + C
c = a * 2.0
d = A - pos
b_dot = vector2_dot(b,b)
if b_dot == 0.0:
return np.zeros( (height,width), dtype=np.float32 )
kk = 1.0 / b_dot
kx = kk * vector2_dot(a,b)
ky = kk * (2.0*vector2_dot(a,a)+vector2_dot(d,b))/3.0;
kz = kk * vector2_dot(d,a);
res = 0.0;
sgn = 0.0;
p = ky - kx*kx;
p3 = p*p*p;
q = kx*(2.0*kx*kx - 3.0*ky) + kz;
h = q*q + 4.0*p3;
hp_sel = h >= 0.0
hp_p = h[hp_sel]
hp_p = np.sqrt(hp_p)
hp_x = ( np.stack( (hp_p,-hp_p), -1) -q[hp_sel,None] ) / 2.0
hp_uv = np.sign(hp_x) * np.power( np.abs(hp_x), [1.0/3.0, 1.0/3.0] )
hp_t = np.clip( hp_uv[...,0] + hp_uv[...,1] - kx, 0.0, 1.0 )
hp_t = hp_t[...,None]
hp_q = d[hp_sel]+(c+b*hp_t)*hp_t
hp_res = vector2_dot2(hp_q)
hp_sgn = vector2_cross(c+2.0*b*hp_t,hp_q)
hl_sel = h < 0.0
hl_q = q[hl_sel]
hl_p = p[hl_sel]
hl_z = np.sqrt(-hl_p)
hl_v = np.arccos( hl_q / (hl_p*hl_z*2.0)) / 3.0
hl_m = np.cos(hl_v)
hl_n = np.sin(hl_v)*1.732050808;
hl_t = np.clip( np.stack( (hl_m+hl_m,-hl_n-hl_m,hl_n-hl_m), -1)*hl_z[...,None]-kx, 0.0, 1.0 );
hl_d = d[hl_sel]
hl_qx = hl_d+(c+b*hl_t[...,0:1])*hl_t[...,0:1]
hl_dx = vector2_dot2(hl_qx)
hl_sx = vector2_cross(c+2.0*b*hl_t[...,0:1], hl_qx)
hl_qy = hl_d+(c+b*hl_t[...,1:2])*hl_t[...,1:2]
hl_dy = vector2_dot2(hl_qy)
hl_sy = vector2_cross(c+2.0*b*hl_t[...,1:2],hl_qy);
hl_dx_l_dy = hl_dx<hl_dy
hl_dx_ge_dy = hl_dx>=hl_dy
hl_res = np.empty_like(hl_dx)
hl_res[hl_dx_l_dy] = hl_dx[hl_dx_l_dy]
hl_res[hl_dx_ge_dy] = hl_dy[hl_dx_ge_dy]
hl_sgn = np.empty_like(hl_sx)
hl_sgn[hl_dx_l_dy] = hl_sx[hl_dx_l_dy]
hl_sgn[hl_dx_ge_dy] = hl_sy[hl_dx_ge_dy]
res = np.empty( (height, width), np.float32 )
res[hp_sel] = hp_res
res[hl_sel] = hl_res
sgn = np.empty( (height, width), np.float32 )
sgn[hp_sel] = hp_sgn
sgn[hl_sel] = hl_sgn
sgn = np.sign(sgn)
res = np.sqrt(res)*sgn
return res[...,None]
def random_faded(wh):
"""
apply one of them:
random_circle_faded
random_bezier_split_faded
"""
rnd = np.random.randint(2)
if rnd == 0:
return random_circle_faded(wh)
elif rnd == 1:
return random_bezier_split_faded(wh)
def random_circle_faded ( wh, rnd_state=None ):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random
h,w = hw w,h = wh
hw_max = max(h,w) wh_max = max(w,h)
fade_start = rnd_state.randint(hw_max) fade_start = rnd_state.randint(wh_max)
fade_end = fade_start + rnd_state.randint(hw_max- fade_start) fade_end = fade_start + rnd_state.randint(wh_max- fade_start)
return circle_faded (hw, [ rnd_state.randint(h), rnd_state.randint(w) ], return circle_faded (wh, [ rnd_state.randint(h), rnd_state.randint(w) ],
[fade_start, fade_end] ) [fade_start, fade_end] )
def random_bezier_split_faded( wh ):
width, height = wh
degA = np.random.randint(360)
degB = np.random.randint(360)
degC = np.random.randint(360)
deg_2_rad = math.pi / 180.0
center = np.float32([width / 2.0, height / 2.0])
radius = max(width, height)
A = center + radius*np.float32([ math.sin( degA * deg_2_rad), math.cos( degA * deg_2_rad) ] )
B = center + np.random.randint(radius)*np.float32([ math.sin( degB * deg_2_rad), math.cos( degB * deg_2_rad) ] )
C = center + radius*np.float32([ math.sin( degC * deg_2_rad), math.cos( degC * deg_2_rad) ] )
x = bezier( (width,height), A, B, C )
x = x / (1+np.random.randint(radius)) + 0.5
x = np.clip(x, 0, 1)
return x

View file

@ -2,7 +2,7 @@ import numpy as np
import cv2 import cv2
from core import randomex from core import randomex
def gen_warp_params (w, flip, rotation_range=[-2,2], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None ): def gen_warp_params (w, flip=False, rotation_range=[-2,2], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_state=None ):
if rnd_state is None: if rnd_state is None:
rnd_state = np.random rnd_state = np.random

View file

@ -1,12 +1,19 @@
import sys import sys
import ctypes import ctypes
import os import os
import multiprocessing
import json
import time
from pathlib import Path
from core.interact import interact as io
class Device(object): class Device(object):
def __init__(self, index, name, total_mem, free_mem, cc=0): def __init__(self, index, tf_dev_type, name, total_mem, free_mem):
self.index = index self.index = index
self.tf_dev_type = tf_dev_type
self.name = name self.name = name
self.cc = cc
self.total_mem = total_mem self.total_mem = total_mem
self.total_mem_gb = total_mem / 1024**3 self.total_mem_gb = total_mem / 1024**3
self.free_mem = free_mem self.free_mem = free_mem
@ -82,12 +89,135 @@ class Devices(object):
result.append (device) result.append (device)
return Devices(result) return Devices(result)
@staticmethod
def _get_tf_devices_proc(q : multiprocessing.Queue):
if sys.platform[0:3] == 'win':
compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache_ALL')
os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path)
if not compute_cache_path.exists():
io.log_info("Caching GPU kernels...")
compute_cache_path.mkdir(parents=True, exist_ok=True)
import tensorflow
tf_version = tensorflow.version.VERSION
#if tf_version is None:
# tf_version = tensorflow.version.GIT_VERSION
if tf_version[0] == 'v':
tf_version = tf_version[1:]
if tf_version[0] == '2':
tf = tensorflow.compat.v1
else:
tf = tensorflow
import logging
# Disable tensorflow warnings
tf_logger = logging.getLogger('tensorflow')
tf_logger.setLevel(logging.ERROR)
from tensorflow.python.client import device_lib
devices = []
physical_devices = device_lib.list_local_devices()
physical_devices_f = {}
for dev in physical_devices:
dev_type = dev.device_type
dev_tf_name = dev.name
dev_tf_name = dev_tf_name[ dev_tf_name.index(dev_type) : ]
dev_idx = int(dev_tf_name.split(':')[-1])
if dev_type in ['GPU','DML']:
dev_name = dev_tf_name
dev_desc = dev.physical_device_desc
if len(dev_desc) != 0:
if dev_desc[0] == '{':
dev_desc_json = json.loads(dev_desc)
dev_desc_json_name = dev_desc_json.get('name',None)
if dev_desc_json_name is not None:
dev_name = dev_desc_json_name
else:
for param, value in ( v.split(':') for v in dev_desc.split(',') ):
param = param.strip()
value = value.strip()
if param == 'name':
dev_name = value
break
physical_devices_f[dev_idx] = (dev_type, dev_name, dev.memory_limit)
q.put(physical_devices_f)
time.sleep(0.1)
@staticmethod @staticmethod
def initialize_main_env(): def initialize_main_env():
os.environ['NN_DEVICES_INITIALIZED'] = '1' if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 0:
os.environ['NN_DEVICES_COUNT'] = '0' return
if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
os.environ.pop('CUDA_VISIBLE_DEVICES')
os.environ['CUDA_CACHE_MAXSIZE'] = '2147483647' os.environ['CUDA_CACHE_MAXSIZE'] = '2147483647'
os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tf log errors only
q = multiprocessing.Queue()
p = multiprocessing.Process(target=Devices._get_tf_devices_proc, args=(q,), daemon=True)
p.start()
p.join()
visible_devices = q.get()
os.environ['NN_DEVICES_INITIALIZED'] = '1'
os.environ['NN_DEVICES_COUNT'] = str(len(visible_devices))
for i in visible_devices:
dev_type, name, total_mem = visible_devices[i]
os.environ[f'NN_DEVICE_{i}_TF_DEV_TYPE'] = dev_type
os.environ[f'NN_DEVICE_{i}_NAME'] = name
os.environ[f'NN_DEVICE_{i}_TOTAL_MEM'] = str(total_mem)
os.environ[f'NN_DEVICE_{i}_FREE_MEM'] = str(total_mem)
@staticmethod
def getDevices():
if Devices.all_devices is None:
if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 1:
raise Exception("nn devices are not initialized. Run initialize_main_env() in main process.")
devices = []
for i in range ( int(os.environ['NN_DEVICES_COUNT']) ):
devices.append ( Device(index=i,
tf_dev_type=os.environ[f'NN_DEVICE_{i}_TF_DEV_TYPE'],
name=os.environ[f'NN_DEVICE_{i}_NAME'],
total_mem=int(os.environ[f'NN_DEVICE_{i}_TOTAL_MEM']),
free_mem=int(os.environ[f'NN_DEVICE_{i}_FREE_MEM']), )
)
Devices.all_devices = Devices(devices)
return Devices.all_devices
"""
# {'name' : name.split(b'\0', 1)[0].decode(),
# 'total_mem' : totalMem.value
# }
return
min_cc = int(os.environ.get("TF_MIN_REQ_CAP", 35)) min_cc = int(os.environ.get("TF_MIN_REQ_CAP", 35))
libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll') libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll')
for libname in libnames: for libname in libnames:
@ -139,70 +269,4 @@ class Devices(object):
os.environ[f'NN_DEVICE_{i}_TOTAL_MEM'] = str(device['total_mem']) os.environ[f'NN_DEVICE_{i}_TOTAL_MEM'] = str(device['total_mem'])
os.environ[f'NN_DEVICE_{i}_FREE_MEM'] = str(device['free_mem']) os.environ[f'NN_DEVICE_{i}_FREE_MEM'] = str(device['free_mem'])
os.environ[f'NN_DEVICE_{i}_CC'] = str(device['cc']) os.environ[f'NN_DEVICE_{i}_CC'] = str(device['cc'])
@staticmethod
def getDevices():
if Devices.all_devices is None:
if int(os.environ.get("NN_DEVICES_INITIALIZED", 0)) != 1:
raise Exception("nn devices are not initialized. Run initialize_main_env() in main process.")
devices = []
for i in range ( int(os.environ['NN_DEVICES_COUNT']) ):
devices.append ( Device(index=i,
name=os.environ[f'NN_DEVICE_{i}_NAME'],
total_mem=int(os.environ[f'NN_DEVICE_{i}_TOTAL_MEM']),
free_mem=int(os.environ[f'NN_DEVICE_{i}_FREE_MEM']),
cc=int(os.environ[f'NN_DEVICE_{i}_CC']) ))
Devices.all_devices = Devices(devices)
return Devices.all_devices
"""
if Devices.all_devices is None:
min_cc = int(os.environ.get("TF_MIN_REQ_CAP", 35))
libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
except:
continue
else:
break
else:
return Devices([])
nGpus = ctypes.c_int()
name = b' ' * 200
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()
freeMem = ctypes.c_size_t()
totalMem = ctypes.c_size_t()
result = ctypes.c_int()
device = ctypes.c_int()
context = ctypes.c_void_p()
error_str = ctypes.c_char_p()
devices = []
if cuda.cuInit(0) == 0 and \
cuda.cuDeviceGetCount(ctypes.byref(nGpus)) == 0:
for i in range(nGpus.value):
if cuda.cuDeviceGet(ctypes.byref(device), i) != 0 or \
cuda.cuDeviceGetName(ctypes.c_char_p(name), len(name), device) != 0 or \
cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor), device) != 0:
continue
if cuda.cuCtxCreate_v2(ctypes.byref(context), 0, device) == 0:
if cuda.cuMemGetInfo_v2(ctypes.byref(freeMem), ctypes.byref(totalMem)) == 0:
cc = cc_major.value * 10 + cc_minor.value
if cc >= min_cc:
devices.append ( Device(index=i,
name=name.split(b'\0', 1)[0].decode(),
total_mem=totalMem.value,
free_mem=freeMem.value,
cc=cc) )
cuda.cuCtxDetach(context)
Devices.all_devices = Devices(devices)
return Devices.all_devices
""" """

View file

@ -28,11 +28,12 @@ class XSeg(nn.ModelBase):
x = self.frn(x) x = self.frn(x)
x = self.tlu(x) x = self.tlu(x)
return x return x
self.base_ch = base_ch
self.conv01 = ConvBlock(in_ch, base_ch) self.conv01 = ConvBlock(in_ch, base_ch)
self.conv02 = ConvBlock(base_ch, base_ch) self.conv02 = ConvBlock(base_ch, base_ch)
self.bp0 = nn.BlurPool (filt_size=3) self.bp0 = nn.BlurPool (filt_size=4)
self.conv11 = ConvBlock(base_ch, base_ch*2) self.conv11 = ConvBlock(base_ch, base_ch*2)
self.conv12 = ConvBlock(base_ch*2, base_ch*2) self.conv12 = ConvBlock(base_ch*2, base_ch*2)
@ -40,19 +41,30 @@ class XSeg(nn.ModelBase):
self.conv21 = ConvBlock(base_ch*2, base_ch*4) self.conv21 = ConvBlock(base_ch*2, base_ch*4)
self.conv22 = ConvBlock(base_ch*4, base_ch*4) self.conv22 = ConvBlock(base_ch*4, base_ch*4)
self.conv23 = ConvBlock(base_ch*4, base_ch*4) self.bp2 = nn.BlurPool (filt_size=2)
self.bp2 = nn.BlurPool (filt_size=3)
self.conv31 = ConvBlock(base_ch*4, base_ch*8) self.conv31 = ConvBlock(base_ch*4, base_ch*8)
self.conv32 = ConvBlock(base_ch*8, base_ch*8) self.conv32 = ConvBlock(base_ch*8, base_ch*8)
self.conv33 = ConvBlock(base_ch*8, base_ch*8) self.conv33 = ConvBlock(base_ch*8, base_ch*8)
self.bp3 = nn.BlurPool (filt_size=3) self.bp3 = nn.BlurPool (filt_size=2)
self.conv41 = ConvBlock(base_ch*8, base_ch*8) self.conv41 = ConvBlock(base_ch*8, base_ch*8)
self.conv42 = ConvBlock(base_ch*8, base_ch*8) self.conv42 = ConvBlock(base_ch*8, base_ch*8)
self.conv43 = ConvBlock(base_ch*8, base_ch*8) self.conv43 = ConvBlock(base_ch*8, base_ch*8)
self.bp4 = nn.BlurPool (filt_size=3) self.bp4 = nn.BlurPool (filt_size=2)
self.conv51 = ConvBlock(base_ch*8, base_ch*8)
self.conv52 = ConvBlock(base_ch*8, base_ch*8)
self.conv53 = ConvBlock(base_ch*8, base_ch*8)
self.bp5 = nn.BlurPool (filt_size=2)
self.dense1 = nn.Dense ( 4*4* base_ch*8, 512)
self.dense2 = nn.Dense ( 512, 4*4* base_ch*8)
self.up5 = UpConvBlock (base_ch*8, base_ch*4)
self.uconv53 = ConvBlock(base_ch*12, base_ch*8)
self.uconv52 = ConvBlock(base_ch*8, base_ch*8)
self.uconv51 = ConvBlock(base_ch*8, base_ch*8)
self.up4 = UpConvBlock (base_ch*8, base_ch*4) self.up4 = UpConvBlock (base_ch*8, base_ch*4)
self.uconv43 = ConvBlock(base_ch*12, base_ch*8) self.uconv43 = ConvBlock(base_ch*12, base_ch*8)
@ -65,8 +77,7 @@ class XSeg(nn.ModelBase):
self.uconv31 = ConvBlock(base_ch*8, base_ch*8) self.uconv31 = ConvBlock(base_ch*8, base_ch*8)
self.up2 = UpConvBlock (base_ch*8, base_ch*4) self.up2 = UpConvBlock (base_ch*8, base_ch*4)
self.uconv23 = ConvBlock(base_ch*8, base_ch*4) self.uconv22 = ConvBlock(base_ch*8, base_ch*4)
self.uconv22 = ConvBlock(base_ch*4, base_ch*4)
self.uconv21 = ConvBlock(base_ch*4, base_ch*4) self.uconv21 = ConvBlock(base_ch*4, base_ch*4)
self.up1 = UpConvBlock (base_ch*4, base_ch*2) self.up1 = UpConvBlock (base_ch*4, base_ch*2)
@ -78,8 +89,7 @@ class XSeg(nn.ModelBase):
self.uconv01 = ConvBlock(base_ch, base_ch) self.uconv01 = ConvBlock(base_ch, base_ch)
self.out_conv = nn.Conv2D (base_ch, out_ch, kernel_size=3, padding='SAME') self.out_conv = nn.Conv2D (base_ch, out_ch, kernel_size=3, padding='SAME')
self.conv_center = ConvBlock(base_ch*8, base_ch*8)
def forward(self, inp): def forward(self, inp):
x = inp x = inp
@ -92,8 +102,7 @@ class XSeg(nn.ModelBase):
x = self.bp1(x) x = self.bp1(x)
x = self.conv21(x) x = self.conv21(x)
x = self.conv22(x) x = x2 = self.conv22(x)
x = x2 = self.conv23(x)
x = self.bp2(x) x = self.bp2(x)
x = self.conv31(x) x = self.conv31(x)
@ -106,8 +115,21 @@ class XSeg(nn.ModelBase):
x = x4 = self.conv43(x) x = x4 = self.conv43(x)
x = self.bp4(x) x = self.bp4(x)
x = self.conv_center(x) x = self.conv51(x)
x = self.conv52(x)
x = x5 = self.conv53(x)
x = self.bp5(x)
x = nn.flatten(x)
x = self.dense1(x)
x = self.dense2(x)
x = nn.reshape_4D (x, 4, 4, self.base_ch*8 )
x = self.up5(x)
x = self.uconv53(tf.concat([x,x5],axis=nn.conv2d_ch_axis))
x = self.uconv52(x)
x = self.uconv51(x)
x = self.up4(x) x = self.up4(x)
x = self.uconv43(tf.concat([x,x4],axis=nn.conv2d_ch_axis)) x = self.uconv43(tf.concat([x,x4],axis=nn.conv2d_ch_axis))
x = self.uconv42(x) x = self.uconv42(x)
@ -119,8 +141,7 @@ class XSeg(nn.ModelBase):
x = self.uconv31(x) x = self.uconv31(x)
x = self.up2(x) x = self.up2(x)
x = self.uconv23(tf.concat([x,x2],axis=nn.conv2d_ch_axis)) x = self.uconv22(tf.concat([x,x2],axis=nn.conv2d_ch_axis))
x = self.uconv22(x)
x = self.uconv21(x) x = self.uconv21(x)
x = self.up1(x) x = self.up1(x)

View file

@ -33,8 +33,8 @@ class nn():
tf = None tf = None
tf_sess = None tf_sess = None
tf_sess_config = None tf_sess_config = None
tf_default_device = None tf_default_device_name = None
data_format = None data_format = None
conv2d_ch_axis = None conv2d_ch_axis = None
conv2d_spatial_axes = None conv2d_spatial_axes = None
@ -51,9 +51,6 @@ class nn():
# Manipulate environment variables before import tensorflow # Manipulate environment variables before import tensorflow
if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
os.environ.pop('CUDA_VISIBLE_DEVICES')
first_run = False first_run = False
if len(device_config.devices) != 0: if len(device_config.devices) != 0:
if sys.platform[0:3] == 'win': if sys.platform[0:3] == 'win':
@ -68,22 +65,19 @@ class nn():
compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache' + devices_str) compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache' + devices_str)
if not compute_cache_path.exists(): if not compute_cache_path.exists():
first_run = True first_run = True
compute_cache_path.mkdir(parents=True, exist_ok=True)
os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path) os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path)
os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tf log errors only
if first_run: if first_run:
io.log_info("Caching GPU kernels...") io.log_info("Caching GPU kernels...")
import tensorflow import tensorflow
tf_version = getattr(tensorflow,'VERSION', None) tf_version = tensorflow.version.VERSION
if tf_version is None: #if tf_version is None:
tf_version = tensorflow.version.GIT_VERSION # tf_version = tensorflow.version.GIT_VERSION
if tf_version[0] == 'v': if tf_version[0] == 'v':
tf_version = tf_version[1:] tf_version = tf_version[1:]
if tf_version[0] == '2': if tf_version[0] == '2':
tf = tensorflow.compat.v1 tf = tensorflow.compat.v1
else: else:
@ -108,13 +102,14 @@ class nn():
# Configure tensorflow session-config # Configure tensorflow session-config
if len(device_config.devices) == 0: if len(device_config.devices) == 0:
nn.tf_default_device = "/CPU:0"
config = tf.ConfigProto(device_count={'GPU': 0}) config = tf.ConfigProto(device_count={'GPU': 0})
nn.tf_default_device_name = '/CPU:0'
else: else:
nn.tf_default_device = "/GPU:0" nn.tf_default_device_name = f'/{device_config.devices[0].tf_dev_type}:0'
config = tf.ConfigProto(allow_soft_placement=True) config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.visible_device_list = ','.join([str(device.index) for device in device_config.devices]) config.gpu_options.visible_device_list = ','.join([str(device.index) for device in device_config.devices])
config.gpu_options.force_gpu_compatible = True config.gpu_options.force_gpu_compatible = True
config.gpu_options.allow_growth = True config.gpu_options.allow_growth = True
nn.tf_sess_config = config nn.tf_sess_config = config
@ -202,14 +197,6 @@ class nn():
nn.tf_sess.close() nn.tf_sess.close()
nn.tf_sess = None nn.tf_sess = None
@staticmethod
def get_current_device():
# Undocumented access to last tf.device(...)
objs = nn.tf.get_default_graph()._device_function_stack.peek_objs()
if len(objs) != 0:
return objs[0].display_name
return nn.tf_default_device
@staticmethod @staticmethod
def ask_choose_device_idxs(choose_only_one=False, allow_cpu=True, suggest_best_multi_gpu=False, suggest_all_gpu=False): def ask_choose_device_idxs(choose_only_one=False, allow_cpu=True, suggest_best_multi_gpu=False, suggest_all_gpu=False):
devices = Devices.getDevices() devices = Devices.getDevices()

View file

@ -204,7 +204,7 @@ def random_binomial(shape, p=0.0, dtype=None, seed=None):
seed = np.random.randint(10e6) seed = np.random.randint(10e6)
return array_ops.where( return array_ops.where(
random_ops.random_uniform(shape, dtype=tf.float16, seed=seed) < p, random_ops.random_uniform(shape, dtype=tf.float16, seed=seed) < p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
nn.random_binomial = random_binomial nn.random_binomial = random_binomial
def gaussian_blur(input, radius=2.0): def gaussian_blur(input, radius=2.0):
@ -346,7 +346,9 @@ def depth_to_space(x, size):
x = tf.reshape(x, (-1, oh, ow, oc, )) x = tf.reshape(x, (-1, oh, ow, oc, ))
return x return x
else: else:
return tf.depth_to_space(x, size, data_format=nn.data_format) cfg = nn.getCurrentDeviceConfig()
if not cfg.cpu_only:
return tf.depth_to_space(x, size, data_format=nn.data_format)
b,c,h,w = x.shape.as_list() b,c,h,w = x.shape.as_list()
oh, ow = h * size, w * size oh, ow = h * size, w * size
oc = c // (size * size) oc = c // (size * size)
@ -357,11 +359,6 @@ def depth_to_space(x, size):
return x return x
nn.depth_to_space = depth_to_space nn.depth_to_space = depth_to_space
def pixel_norm(x, power = 1.0):
return x * power * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=nn.conv2d_spatial_axes, keepdims=True) + 1e-06)
nn.pixel_norm = pixel_norm
def rgb_to_lab(srgb): def rgb_to_lab(srgb):
srgb_pixels = tf.reshape(srgb, [-1, 3]) srgb_pixels = tf.reshape(srgb, [-1, 3])
linear_mask = tf.cast(srgb_pixels <= 0.04045, dtype=tf.float32) linear_mask = tf.cast(srgb_pixels <= 0.04045, dtype=tf.float32)
@ -404,6 +401,11 @@ def total_variation_mse(images):
return tot_var return tot_var
nn.total_variation_mse = total_variation_mse nn.total_variation_mse = total_variation_mse
def pixel_norm(x, axes):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=axes, keepdims=True) + 1e-06)
nn.pixel_norm = pixel_norm
""" """
def tf_suppress_lower_mean(t, eps=0.00001): def tf_suppress_lower_mean(t, eps=0.00001):
if t.shape.ndims != 1: if t.shape.ndims != 1:

View file

@ -1,7 +1,12 @@
import numpy as np
import math import math
import cv2
import numpy as np
import numpy.linalg as npla
from .umeyama import umeyama from .umeyama import umeyama
def get_power_of_two(x): def get_power_of_two(x):
i = 0 i = 0
while (1 << i) < x: while (1 << i) < x:
@ -23,3 +28,70 @@ def rotationMatrixToEulerAngles(R) :
def polygon_area(x,y): def polygon_area(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1))) return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def rotate_point(origin, point, deg):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
rad = deg * math.pi / 180.0
qx = ox + math.cos(rad) * (px - ox) - math.sin(rad) * (py - oy)
qy = oy + math.sin(rad) * (px - ox) + math.cos(rad) * (py - oy)
return np.float32([qx, qy])
def transform_points(points, mat, invert=False):
if invert:
mat = cv2.invertAffineTransform (mat)
points = np.expand_dims(points, axis=1)
points = cv2.transform(points, mat, points.shape)
points = np.squeeze(points)
return points
def transform_mat(mat, res, tx, ty, rotation, scale):
"""
transform mat in local space of res
scale -> translate -> rotate
tx,ty float
rotation int degrees
scale float
"""
lt, rt, lb, ct = transform_points ( np.float32([(0,0),(res,0),(0,res),(res / 2, res/2) ]),mat, True)
hor_v = (rt-lt).astype(np.float32)
hor_size = npla.norm(hor_v)
hor_v /= hor_size
ver_v = (lb-lt).astype(np.float32)
ver_size = npla.norm(ver_v)
ver_v /= ver_size
bt_diag_vec = (rt-ct).astype(np.float32)
half_diag_len = npla.norm(bt_diag_vec)
bt_diag_vec /= half_diag_len
tb_diag_vec = np.float32( [ -bt_diag_vec[1], bt_diag_vec[0] ] )
rt = ct + bt_diag_vec*half_diag_len*scale
lb = ct - bt_diag_vec*half_diag_len*scale
lt = ct - tb_diag_vec*half_diag_len*scale
rt[0] += tx*hor_size
lb[0] += tx*hor_size
lt[0] += tx*hor_size
rt[1] += ty*ver_size
lb[1] += ty*ver_size
lt[1] += ty*ver_size
rt = rotate_point(ct, rt, rotation)
lb = rotate_point(ct, lb, rotation)
lt = rotate_point(ct, lt, rotation)
return cv2.getAffineTransform( np.float32([lt, rt, lb]), np.float32([ [0,0], [res,0], [0,res] ]) )

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

BIN
doc/logo_directx.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View file

@ -161,11 +161,11 @@ class FaceEnhancer(object):
if not model_path.exists(): if not model_path.exists():
raise Exception("Unable to load FaceEnhancer.npy") raise Exception("Unable to load FaceEnhancer.npy")
with tf.device ('/CPU:0' if place_model_on_cpu else '/GPU:0'): with tf.device ('/CPU:0' if place_model_on_cpu else nn.tf_default_device_name):
self.model = FaceEnhancer() self.model = FaceEnhancer()
self.model.load_weights (model_path) self.model.load_weights (model_path)
with tf.device ('/CPU:0' if run_on_cpu else '/GPU:0'): with tf.device ('/CPU:0' if run_on_cpu else nn.tf_default_device_name):
self.model.build_for_run ([ (tf.float32, nn.get4Dshape (192,192,3) ), self.model.build_for_run ([ (tf.float32, nn.get4Dshape (192,192,3) ),
(tf.float32, (None,1,) ), (tf.float32, (None,1,) ),
(tf.float32, (None,1,) ), (tf.float32, (None,1,) ),

View file

@ -39,7 +39,7 @@ class XSegNet(object):
self.target_t = tf.placeholder (nn.floatx, nn.get4Dshape(resolution,resolution,1) ) self.target_t = tf.placeholder (nn.floatx, nn.get4Dshape(resolution,resolution,1) )
# Initializing model classes # Initializing model classes
with tf.device ('/CPU:0' if place_model_on_cpu else '/GPU:0'): with tf.device ('/CPU:0' if place_model_on_cpu else nn.tf_default_device_name):
self.model = nn.XSeg(3, 32, 1, name=name) self.model = nn.XSeg(3, 32, 1, name=name)
self.model_weights = self.model.get_weights() self.model_weights = self.model.get_weights()
if training: if training:
@ -53,7 +53,7 @@ class XSegNet(object):
self.model_filename_list += [ [self.model, f'{model_name}.npy'] ] self.model_filename_list += [ [self.model, f'{model_name}.npy'] ]
if not training: if not training:
with tf.device ('/CPU:0' if run_on_cpu else '/GPU:0'): with tf.device ('/CPU:0' if run_on_cpu else nn.tf_default_device_name):
_, pred = self.model(self.input_t) _, pred = self.model(self.input_t)
def net_run(input_np): def net_run(input_np):

12
main.py
View file

@ -127,6 +127,7 @@ if __name__ == "__main__":
'silent_start' : arguments.silent_start, 'silent_start' : arguments.silent_start,
'execute_programs' : [ [int(x[0]), x[1] ] for x in arguments.execute_program ], 'execute_programs' : [ [int(x[0]), x[1] ] for x in arguments.execute_program ],
'debug' : arguments.debug, 'debug' : arguments.debug,
'dump_ckpt' : arguments.dump_ckpt,
'flask_preview' : arguments.flask_preview, 'flask_preview' : arguments.flask_preview,
} }
from mainscripts import Trainer from mainscripts import Trainer
@ -145,6 +146,7 @@ if __name__ == "__main__":
p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.") p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.")
p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.") p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.")
p.add_argument('--silent-start', action="store_true", dest="silent_start", default=False, help="Silent start. Automatically chooses Best GPU and last used model.") p.add_argument('--silent-start', action="store_true", dest="silent_start", default=False, help="Silent start. Automatically chooses Best GPU and last used model.")
p.add_argument('--dump-ckpt', action="store_true", dest="dump_ckpt", default=False, help="Dump the model to ckpt format.")
p.add_argument('--flask-preview', action="store_true", dest="flask_preview", default=False, p.add_argument('--flask-preview', action="store_true", dest="flask_preview", default=False,
help="Launches a flask server to view the previews in a web browser") help="Launches a flask server to view the previews in a web browser")
@ -254,7 +256,17 @@ if __name__ == "__main__":
p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.") p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.")
p.set_defaults(func=process_faceset_enhancer) p.set_defaults(func=process_faceset_enhancer)
p = facesettool_parser.add_parser ("resize", help="Resize DFL faceset.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory of aligned faces.")
def process_faceset_resizer(arguments):
osex.set_process_lowest_prio()
from mainscripts import FacesetResizer
FacesetResizer.process_folder ( Path(arguments.input_dir) )
p.set_defaults(func=process_faceset_resizer)
def process_dev_test(arguments): def process_dev_test(arguments):
osex.set_process_lowest_prio() osex.set_process_lowest_prio()
from mainscripts import dev_misc from mainscripts import dev_misc

View file

@ -97,9 +97,6 @@ class ExtractSubprocessor(Subprocessor):
h, w, c = image.shape h, w, c = image.shape
dflimg = DFLIMG.load (filepath)
extract_from_dflimg = (h == w and (dflimg is not None and dflimg.has_data()) )
if 'rects' in self.type or self.type == 'all': if 'rects' in self.type or self.type == 'all':
data = ExtractSubprocessor.Cli.rects_stage (data=data, data = ExtractSubprocessor.Cli.rects_stage (data=data,
image=image, image=image,
@ -110,7 +107,6 @@ class ExtractSubprocessor(Subprocessor):
if 'landmarks' in self.type or self.type == 'all': if 'landmarks' in self.type or self.type == 'all':
data = ExtractSubprocessor.Cli.landmarks_stage (data=data, data = ExtractSubprocessor.Cli.landmarks_stage (data=data,
image=image, image=image,
extract_from_dflimg=extract_from_dflimg,
landmarks_extractor=self.landmarks_extractor, landmarks_extractor=self.landmarks_extractor,
rects_extractor=self.rects_extractor, rects_extractor=self.rects_extractor,
) )
@ -121,7 +117,6 @@ class ExtractSubprocessor(Subprocessor):
face_type=self.face_type, face_type=self.face_type,
image_size=self.image_size, image_size=self.image_size,
jpeg_quality=self.jpeg_quality, jpeg_quality=self.jpeg_quality,
extract_from_dflimg=extract_from_dflimg,
output_debug_path=self.output_debug_path, output_debug_path=self.output_debug_path,
final_output_path=self.final_output_path, final_output_path=self.final_output_path,
) )
@ -161,7 +156,6 @@ class ExtractSubprocessor(Subprocessor):
@staticmethod @staticmethod
def landmarks_stage(data, def landmarks_stage(data,
image, image,
extract_from_dflimg,
landmarks_extractor, landmarks_extractor,
rects_extractor, rects_extractor,
): ):
@ -176,7 +170,7 @@ class ExtractSubprocessor(Subprocessor):
elif data.rects_rotation == 270: elif data.rects_rotation == 270:
rotated_image = image.swapaxes( 0,1 )[::-1,:,:] rotated_image = image.swapaxes( 0,1 )[::-1,:,:]
data.landmarks = landmarks_extractor.extract (rotated_image, data.rects, rects_extractor if (not extract_from_dflimg and data.landmarks_accurate) else None, is_bgr=True) data.landmarks = landmarks_extractor.extract (rotated_image, data.rects, rects_extractor if (data.landmarks_accurate) else None, is_bgr=True)
if data.rects_rotation != 0: if data.rects_rotation != 0:
for i, (rect, lmrks) in enumerate(zip(data.rects, data.landmarks)): for i, (rect, lmrks) in enumerate(zip(data.rects, data.landmarks)):
new_rect, new_lmrks = rect, lmrks new_rect, new_lmrks = rect, lmrks
@ -207,7 +201,6 @@ class ExtractSubprocessor(Subprocessor):
face_type, face_type,
image_size, image_size,
jpeg_quality, jpeg_quality,
extract_from_dflimg = False,
output_debug_path=None, output_debug_path=None,
final_output_path=None, final_output_path=None,
): ):
@ -219,72 +212,53 @@ class ExtractSubprocessor(Subprocessor):
if output_debug_path is not None: if output_debug_path is not None:
debug_image = image.copy() debug_image = image.copy()
if extract_from_dflimg and len(rects) != 1: face_idx = 0
#if re-extracting from dflimg and more than 1 or zero faces detected - dont process and just copy it for rect, image_landmarks in zip( rects, landmarks ):
print("extract_from_dflimg and len(rects) != 1", filepath ) if image_landmarks is None:
output_filepath = final_output_path / filepath.name continue
if filepath != str(output_file):
shutil.copy ( str(filepath), str(output_filepath) )
data.final_output_files.append (output_filepath)
else:
face_idx = 0
for rect, image_landmarks in zip( rects, landmarks ):
if extract_from_dflimg and face_idx > 1: rect = np.array(rect)
#cannot extract more than 1 face from dflimg
break
if image_landmarks is None: if face_type == FaceType.MARK_ONLY:
image_to_face_mat = None
face_image = image
face_image_landmarks = image_landmarks
else:
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, image_size, face_type)
face_image = cv2.warpAffine(image, image_to_face_mat, (image_size, image_size), cv2.INTER_LANCZOS4)
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,image_size-1), (image_size-1, image_size-1), (image_size-1,0) ], image_to_face_mat, True)
rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]).astype(np.float32), np.array(rect[[1,1,3,3]]).astype(np.float32))
landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0].astype(np.float32), landmarks_bbox[:,1].astype(np.float32) )
if not data.manual and face_type <= FaceType.FULL_NO_ALIGN and landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
continue continue
rect = np.array(rect) if output_debug_path is not None:
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, face_type, image_size, transparent_mask=True)
if face_type == FaceType.MARK_ONLY: output_path = final_output_path
image_to_face_mat = None if data.force_output_path is not None:
face_image = image output_path = data.force_output_path
face_image_landmarks = image_landmarks
else:
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, image_size, face_type)
face_image = cv2.warpAffine(image, image_to_face_mat, (image_size, image_size), cv2.INTER_LANCZOS4) output_filepath = output_path / f"{filepath.stem}_{face_idx}.jpg"
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat) cv2_imwrite(output_filepath, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality ] )
landmarks_bbox = LandmarksProcessor.transform_points ( [ (0,0), (0,image_size-1), (image_size-1, image_size-1), (image_size-1,0) ], image_to_face_mat, True) dflimg = DFLJPG.load(output_filepath)
dflimg.set_face_type(FaceType.toString(face_type))
dflimg.set_landmarks(face_image_landmarks.tolist())
dflimg.set_source_filename(filepath.name)
dflimg.set_source_rect(rect)
dflimg.set_source_landmarks(image_landmarks.tolist())
dflimg.set_image_to_face_mat(image_to_face_mat)
dflimg.save()
rect_area = mathlib.polygon_area(np.array(rect[[0,2,2,0]]).astype(np.float32), np.array(rect[[1,1,3,3]]).astype(np.float32)) data.final_output_files.append (output_filepath)
landmarks_area = mathlib.polygon_area(landmarks_bbox[:,0].astype(np.float32), landmarks_bbox[:,1].astype(np.float32) ) face_idx += 1
data.faces_detected = face_idx
if not data.manual and face_type <= FaceType.FULL_NO_ALIGN and landmarks_area > 4*rect_area: #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
continue
if output_debug_path is not None:
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, face_type, image_size, transparent_mask=True)
output_path = final_output_path
if data.force_output_path is not None:
output_path = data.force_output_path
if extract_from_dflimg and filepath.suffix == '.jpg':
#if extracting from dflimg and jpg copy it in order not to lose quality
output_filepath = output_path / filepath.name
if filepath != output_filepath:
shutil.copy ( str(filepath), str(output_filepath) )
else:
output_filepath = output_path / f"{filepath.stem}_{face_idx}.jpg"
cv2_imwrite(output_filepath, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality ] )
dflimg = DFLJPG.load(output_filepath)
dflimg.set_face_type(FaceType.toString(face_type))
dflimg.set_landmarks(face_image_landmarks.tolist())
dflimg.set_source_filename(filepath.name)
dflimg.set_source_rect(rect)
dflimg.set_source_landmarks(image_landmarks.tolist())
dflimg.set_image_to_face_mat(image_to_face_mat)
dflimg.save()
data.final_output_files.append (output_filepath)
face_idx += 1
data.faces_detected = face_idx
if output_debug_path is not None: if output_debug_path is not None:
cv2_imwrite( output_debug_path / (filepath.stem+'.jpg'), debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] ) cv2_imwrite( output_debug_path / (filepath.stem+'.jpg'), debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )

View file

@ -0,0 +1,209 @@
import multiprocessing
import shutil
import cv2
from core import pathex
from core.cv2ex import *
from core.interact import interact as io
from core.joblib import Subprocessor
from DFLIMG import *
from facelib import FaceType, LandmarksProcessor
class FacesetResizerSubprocessor(Subprocessor):
#override
def __init__(self, image_paths, output_dirpath, image_size, face_type=None):
self.image_paths = image_paths
self.output_dirpath = output_dirpath
self.image_size = image_size
self.face_type = face_type
self.result = []
super().__init__('FacesetResizer', FacesetResizerSubprocessor.Cli, 600)
#override
def on_clients_initialized(self):
io.progress_bar (None, len (self.image_paths))
#override
def on_clients_finalized(self):
io.progress_bar_close()
#override
def process_info_generator(self):
base_dict = {'output_dirpath':self.output_dirpath, 'image_size':self.image_size, 'face_type':self.face_type}
for device_idx in range( min(8, multiprocessing.cpu_count()) ):
client_dict = base_dict.copy()
device_name = f'CPU #{device_idx}'
client_dict['device_name'] = device_name
yield device_name, {}, client_dict
#override
def get_data(self, host_dict):
if len (self.image_paths) > 0:
return self.image_paths.pop(0)
#override
def on_data_return (self, host_dict, data):
self.image_paths.insert(0, data)
#override
def on_result (self, host_dict, data, result):
io.progress_bar_inc(1)
if result[0] == 1:
self.result +=[ (result[1], result[2]) ]
#override
def get_result(self):
return self.result
class Cli(Subprocessor.Cli):
#override
def on_initialize(self, client_dict):
self.output_dirpath = client_dict['output_dirpath']
self.image_size = client_dict['image_size']
self.face_type = client_dict['face_type']
self.log_info (f"Running on { client_dict['device_name'] }")
#override
def process_data(self, filepath):
try:
dflimg = DFLIMG.load (filepath)
if dflimg is None or not dflimg.has_data():
self.log_err (f"{filepath.name} is not a dfl image file")
else:
img = cv2_imread(filepath)
h,w = img.shape[:2]
if h != w:
raise Exception(f'w != h in {filepath}')
image_size = self.image_size
face_type = self.face_type
output_filepath = self.output_dirpath / filepath.name
if face_type is not None:
lmrks = dflimg.get_landmarks()
mat = LandmarksProcessor.get_transform_mat(lmrks, image_size, face_type)
img = cv2.warpAffine(img, mat, (image_size, image_size), flags=cv2.INTER_LANCZOS4 )
img = np.clip(img, 0, 255).astype(np.uint8)
cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] )
dfl_dict = dflimg.get_dict()
dflimg = DFLIMG.load (output_filepath)
dflimg.set_dict(dfl_dict)
xseg_mask = dflimg.get_xseg_mask()
if xseg_mask is not None:
xseg_res = 256
xseg_lmrks = lmrks.copy()
xseg_lmrks *= (xseg_res / w)
xseg_mat = LandmarksProcessor.get_transform_mat(xseg_lmrks, xseg_res, face_type)
xseg_mask = cv2.warpAffine(xseg_mask, xseg_mat, (xseg_res, xseg_res), flags=cv2.INTER_LANCZOS4 )
xseg_mask[xseg_mask < 0.5] = 0
xseg_mask[xseg_mask >= 0.5] = 1
dflimg.set_xseg_mask(xseg_mask)
seg_ie_polys = dflimg.get_seg_ie_polys()
for poly in seg_ie_polys.get_polys():
poly_pts = poly.get_pts()
poly_pts = LandmarksProcessor.transform_points(poly_pts, mat)
poly.set_points(poly_pts)
dflimg.set_seg_ie_polys(seg_ie_polys)
lmrks = LandmarksProcessor.transform_points(lmrks, mat)
dflimg.set_landmarks(lmrks)
image_to_face_mat = dflimg.get_image_to_face_mat()
if image_to_face_mat is not None:
image_to_face_mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type )
dflimg.set_image_to_face_mat(image_to_face_mat)
dflimg.set_face_type( FaceType.toString(face_type) )
dflimg.save()
else:
dfl_dict = dflimg.get_dict()
scale = w / image_size
img = cv2.resize(img, (image_size, image_size), interpolation=cv2.INTER_LANCZOS4)
cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] )
dflimg = DFLIMG.load (output_filepath)
dflimg.set_dict(dfl_dict)
lmrks = dflimg.get_landmarks()
lmrks /= scale
dflimg.set_landmarks(lmrks)
seg_ie_polys = dflimg.get_seg_ie_polys()
seg_ie_polys.mult_points( 1.0 / scale)
dflimg.set_seg_ie_polys(seg_ie_polys)
image_to_face_mat = dflimg.get_image_to_face_mat()
if image_to_face_mat is not None:
face_type = FaceType.fromString ( dflimg.get_face_type() )
image_to_face_mat = LandmarksProcessor.get_transform_mat ( dflimg.get_source_landmarks(), image_size, face_type )
dflimg.set_image_to_face_mat(image_to_face_mat)
dflimg.save()
return (1, filepath, output_filepath)
except:
self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}")
return (0, filepath, None)
def process_folder ( dirpath):
image_size = io.input_int(f"New image size", 512, valid_range=[256,2048])
face_type = io.input_str ("Change face type", 'same', ['h','mf','f','wf','head','same']).lower()
if face_type == 'same':
face_type = None
else:
face_type = {'h' : FaceType.HALF,
'mf' : FaceType.MID_FULL,
'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE,
'head' : FaceType.HEAD}[face_type]
output_dirpath = dirpath.parent / (dirpath.name + '_resized')
output_dirpath.mkdir (exist_ok=True, parents=True)
dirpath_parts = '/'.join( dirpath.parts[-2:])
output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] )
io.log_info (f"Resizing faceset in {dirpath_parts}")
io.log_info ( f"Processing to {output_dirpath_parts}")
output_images_paths = pathex.get_image_paths(output_dirpath)
if len(output_images_paths) > 0:
for filename in output_images_paths:
Path(filename).unlink()
image_paths = [Path(x) for x in pathex.get_image_paths( dirpath )]
result = FacesetResizerSubprocessor ( image_paths, output_dirpath, image_size, face_type).run()
is_merge = io.input_bool (f"\r\nMerge {output_dirpath_parts} to {dirpath_parts} ?", True)
if is_merge:
io.log_info (f"Copying processed files to {dirpath_parts}")
for (filepath, output_filepath) in result:
try:
shutil.copy (output_filepath, filepath)
except:
pass
io.log_info (f"Removing {output_dirpath_parts}")
shutil.rmtree(output_dirpath)

View file

@ -1,4 +1,4 @@
import os import os
import sys import sys
import traceback import traceback
import queue import queue
@ -15,23 +15,23 @@ import cv2
import models import models
from core.interact import interact as io from core.interact import interact as io
def trainerThread (s2c, c2s, e,
def trainerThread(s2c, c2s, e, socketio=None,
socketio=None, model_class_name = None,
model_class_name=None, saved_models_path = None,
saved_models_path=None, training_data_src_path = None,
training_data_src_path=None, training_data_dst_path = None,
training_data_dst_path=None, pretraining_data_path = None,
pretraining_data_path=None, pretrained_model_path = None,
pretrained_model_path=None, no_preview=False,
no_preview=False, force_model_name=None,
force_model_name=None, force_gpu_idxs=None,
force_gpu_idxs=None, cpu_only=None,
cpu_only=None, silent_start=False,
silent_start=False, execute_programs = None,
execute_programs=None, debug=False,
debug=False, dump_ckpt=False,
**kwargs): **kwargs):
while True: while True:
try: try:
start_time = time.time() start_time = time.time()
@ -46,22 +46,29 @@ def trainerThread(s2c, c2s, e,
if not saved_models_path.exists(): if not saved_models_path.exists():
saved_models_path.mkdir(exist_ok=True, parents=True) saved_models_path.mkdir(exist_ok=True, parents=True)
if dump_ckpt:
cpu_only=True
model = models.import_model(model_class_name)( model = models.import_model(model_class_name)(
is_training=True, is_training=not dump_ckpt,
saved_models_path=saved_models_path, saved_models_path=saved_models_path,
training_data_src_path=training_data_src_path, training_data_src_path=training_data_src_path,
training_data_dst_path=training_data_dst_path, training_data_dst_path=training_data_dst_path,
pretraining_data_path=pretraining_data_path, pretraining_data_path=pretraining_data_path,
pretrained_model_path=pretrained_model_path, pretrained_model_path=pretrained_model_path,
no_preview=no_preview, no_preview=no_preview,
force_model_name=force_model_name, force_model_name=force_model_name,
force_gpu_idxs=force_gpu_idxs, force_gpu_idxs=force_gpu_idxs,
cpu_only=cpu_only, cpu_only=cpu_only,
silent_start=silent_start, silent_start=silent_start,
debug=debug, debug=debug)
)
if dump_ckpt:
e.set()
model.dump_ckpt()
break
is_reached_goal = model.is_reached_iter_goal() is_reached_goal = model.is_reached_iter_goal()
shared_state = {'after_save': False} shared_state = {'after_save': False}

View file

@ -10,8 +10,8 @@ from core.cv2ex import *
from core.interact import interact as io from core.interact import interact as io
from core.leras import nn from core.leras import nn
from DFLIMG import * from DFLIMG import *
from facelib import XSegNet from facelib import XSegNet, LandmarksProcessor, FaceType
import pickle
def apply_xseg(input_path, model_path): def apply_xseg(input_path, model_path):
if not input_path.exists(): if not input_path.exists():
@ -20,17 +20,42 @@ def apply_xseg(input_path, model_path):
if not model_path.exists(): if not model_path.exists():
raise ValueError(f'{model_path} not found. Please ensure it exists.') raise ValueError(f'{model_path} not found. Please ensure it exists.')
face_type = None
model_dat = model_path / 'XSeg_data.dat'
if model_dat.exists():
dat = pickle.loads( model_dat.read_bytes() )
dat_options = dat.get('options', None)
if dat_options is not None:
face_type = dat_options.get('face_type', None)
if face_type is None:
face_type = io.input_str ("XSeg model face type", 'same', ['h','mf','f','wf','head','same'], help_message="Specify face type of trained XSeg model. For example if XSeg model trained as WF, but faceset is HEAD, specify WF to apply xseg only on WF part of HEAD. Default is 'same'").lower()
if face_type == 'same':
face_type = None
if face_type is not None:
face_type = {'h' : FaceType.HALF,
'mf' : FaceType.MID_FULL,
'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE,
'head' : FaceType.HEAD}[face_type]
io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.') io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.')
device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True) device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True)
nn.initialize(device_config) nn.initialize(device_config)
xseg = XSegNet(name='XSeg', xseg = XSegNet(name='XSeg',
load_weights=True, load_weights=True,
weights_file_root=model_path, weights_file_root=model_path,
data_format=nn.data_format, data_format=nn.data_format,
raise_on_no_model_files=True) raise_on_no_model_files=True)
res = xseg.get_resolution() xseg_res = xseg.get_resolution()
images_paths = pathex.get_image_paths(input_path, return_Path_class=True) images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
@ -42,15 +67,36 @@ def apply_xseg(input_path, model_path):
img = cv2_imread(filepath).astype(np.float32) / 255.0 img = cv2_imread(filepath).astype(np.float32) / 255.0
h,w,c = img.shape h,w,c = img.shape
if w != res:
img = cv2.resize( img, (res,res), interpolation=cv2.INTER_CUBIC )
if len(img.shape) == 2:
img = img[...,None]
mask = xseg.extract(img)
mask[mask < 0.5]=0
mask[mask >= 0.5]=1
img_face_type = FaceType.fromString( dflimg.get_face_type() )
if face_type is not None and img_face_type != face_type:
lmrks = dflimg.get_source_landmarks()
fmat = LandmarksProcessor.get_transform_mat(lmrks, w, face_type)
imat = LandmarksProcessor.get_transform_mat(lmrks, w, img_face_type)
g_p = LandmarksProcessor.transform_points (np.float32([(0,0),(w,0),(0,w) ]), fmat, True)
g_p2 = LandmarksProcessor.transform_points (g_p, imat)
mat = cv2.getAffineTransform( g_p2, np.float32([(0,0),(w,0),(0,w) ]) )
img = cv2.warpAffine(img, mat, (w, w), cv2.INTER_LANCZOS4)
img = cv2.resize(img, (xseg_res, xseg_res), interpolation=cv2.INTER_LANCZOS4)
else:
if w != xseg_res:
img = cv2.resize( img, (xseg_res,xseg_res), interpolation=cv2.INTER_LANCZOS4 )
if len(img.shape) == 2:
img = img[...,None]
mask = xseg.extract(img)
if face_type is not None and img_face_type != face_type:
mask = cv2.resize(mask, (w, w), interpolation=cv2.INTER_LANCZOS4)
mask = cv2.warpAffine( mask, mat, (w,w), np.zeros( (h,w,c), dtype=np.float), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4)
mask = cv2.resize(mask, (xseg_res, xseg_res), interpolation=cv2.INTER_LANCZOS4)
mask[mask < 0.5]=0
mask[mask >= 0.5]=1
dflimg.set_xseg_mask(mask) dflimg.set_xseg_mask(mask)
dflimg.save() dflimg.save()
@ -67,7 +113,8 @@ def fetch_xseg(input_path):
images_paths = pathex.get_image_paths(input_path, return_Path_class=True) images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
files_copied = 0
files_copied = []
for filepath in io.progress_bar_generator(images_paths, "Processing"): for filepath in io.progress_bar_generator(images_paths, "Processing"):
dflimg = DFLIMG.load(filepath) dflimg = DFLIMG.load(filepath)
if dflimg is None or not dflimg.has_data(): if dflimg is None or not dflimg.has_data():
@ -77,10 +124,16 @@ def fetch_xseg(input_path):
ie_polys = dflimg.get_seg_ie_polys() ie_polys = dflimg.get_seg_ie_polys()
if ie_polys.has_polys(): if ie_polys.has_polys():
files_copied += 1 files_copied.append(filepath)
shutil.copy ( str(filepath), str(output_path / filepath.name) ) shutil.copy ( str(filepath), str(output_path / filepath.name) )
io.log_info(f'Files copied: {files_copied}') io.log_info(f'Files copied: {len(files_copied)}')
is_delete = io.input_bool (f"\r\nDelete original files?", True)
if is_delete:
for filepath in files_copied:
Path(filepath).unlink()
def remove_xseg(input_path): def remove_xseg(input_path):
if not input_path.exists(): if not input_path.exists():

View file

@ -142,7 +142,9 @@ def MergeMaskedFace (predictor_func, predictor_input_shape,
elif 'raw' in cfg.mode: elif 'raw' in cfg.mode:
if cfg.mode == 'raw-rgb': if cfg.mode == 'raw-rgb':
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC ) out_img_face = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC)
out_img_face_mask = cv2.warpAffine( np.ones_like(prd_face_bgr), face_output_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC)
out_img = img_bgr*(1-out_img_face_mask) + out_img_face*out_img_face_mask
out_merging_mask_a = img_face_mask_a out_merging_mask_a = img_face_mask_a
elif cfg.mode == 'raw-predict': elif cfg.mode == 'raw-predict':
out_img = prd_face_bgr out_img = prd_face_bgr

View file

@ -185,7 +185,9 @@ class ModelBase(object):
self.write_preview_history = self.options.get('write_preview_history', False) self.write_preview_history = self.options.get('write_preview_history', False)
self.target_iter = self.options.get('target_iter',0) self.target_iter = self.options.get('target_iter',0)
self.random_flip = self.options.get('random_flip',True) self.random_flip = self.options.get('random_flip',True)
self.random_src_flip = self.options.get('random_src_flip', False)
self.random_dst_flip = self.options.get('random_dst_flip', True)
self.on_initialize() self.on_initialize()
self.options['batch_size'] = self.batch_size self.options['batch_size'] = self.batch_size
@ -297,6 +299,14 @@ class ModelBase(object):
def ask_random_flip(self): def ask_random_flip(self):
default_random_flip = self.load_or_def_option('random_flip', True) default_random_flip = self.load_or_def_option('random_flip', True)
self.options['random_flip'] = io.input_bool("Flip faces randomly", default_random_flip, help_message="Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset.") self.options['random_flip'] = io.input_bool("Flip faces randomly", default_random_flip, help_message="Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset.")
def ask_random_src_flip(self):
default_random_src_flip = self.load_or_def_option('random_src_flip', False)
self.options['random_src_flip'] = io.input_bool("Flip SRC faces randomly", default_random_src_flip, help_message="Random horizontal flip SRC faceset. Covers more angles, but the face may look less naturally.")
def ask_random_dst_flip(self):
default_random_dst_flip = self.load_or_def_option('random_dst_flip', True)
self.options['random_dst_flip'] = io.input_bool("Flip DST faces randomly", default_random_dst_flip, help_message="Random horizontal flip DST faceset. Makes generalization of src->dst better, if src random flip is not enabled.")
def ask_batch_size(self, suggest_batch_size=None, range=None): def ask_batch_size(self, suggest_batch_size=None, range=None):
default_batch_size = self.load_or_def_option('batch_size', suggest_batch_size or self.batch_size) default_batch_size = self.load_or_def_option('batch_size', suggest_batch_size or self.batch_size)

810
models/Model_AMP/Model.py Normal file
View file

@ -0,0 +1,810 @@
import multiprocessing
import operator
from functools import partial
import numpy as np
from core import mathlib
from core.interact import interact as io
from core.leras import nn
from facelib import FaceType
from models import ModelBase
from samplelib import *
from core.cv2ex import *
class AMPModel(ModelBase):
#override
def on_initialize_options(self):
device_config = nn.getCurrentDeviceConfig()
lowest_vram = 2
if len(device_config.devices) != 0:
lowest_vram = device_config.devices.get_worst_device().total_mem_gb
if lowest_vram >= 4:
suggest_batch_size = 8
else:
suggest_batch_size = 4
yn_str = {True:'y',False:'n'}
min_res = 64
max_res = 640
default_resolution = self.options['resolution'] = self.load_or_def_option('resolution', 224)
default_face_type = self.options['face_type'] = self.load_or_def_option('face_type', 'wf')
default_models_opt_on_gpu = self.options['models_opt_on_gpu'] = self.load_or_def_option('models_opt_on_gpu', True)
default_ae_dims = self.options['ae_dims'] = self.load_or_def_option('ae_dims', 256)
default_e_dims = self.options['e_dims'] = self.load_or_def_option('e_dims', 64)
default_d_dims = self.options['d_dims'] = self.options.get('d_dims', None)
default_d_mask_dims = self.options['d_mask_dims'] = self.options.get('d_mask_dims', None)
default_morph_factor = self.options['morph_factor'] = self.options.get('morph_factor', 0.33)
default_masked_training = self.options['masked_training'] = self.load_or_def_option('masked_training', True)
default_eyes_mouth_prio = self.options['eyes_mouth_prio'] = self.load_or_def_option('eyes_mouth_prio', True)
default_uniform_yaw = self.options['uniform_yaw'] = self.load_or_def_option('uniform_yaw', False)
lr_dropout = self.load_or_def_option('lr_dropout', 'n')
lr_dropout = {True:'y', False:'n'}.get(lr_dropout, lr_dropout) #backward comp
default_lr_dropout = self.options['lr_dropout'] = lr_dropout
default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True)
default_ct_mode = self.options['ct_mode'] = self.load_or_def_option('ct_mode', 'none')
default_clipgrad = self.options['clipgrad'] = self.load_or_def_option('clipgrad', False)
default_pretrain = self.options['pretrain'] = self.load_or_def_option('pretrain', False)
ask_override = self.ask_override()
if self.is_first_run() or ask_override:
self.ask_autobackup_hour()
self.ask_write_preview_history()
self.ask_target_iter()
self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(suggest_batch_size)
if self.is_first_run():
resolution = io.input_int("Resolution", default_resolution, add_info="64-640", help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 32 .")
resolution = np.clip ( (resolution // 32) * 32, min_res, max_res)
self.options['resolution'] = resolution
self.options['face_type'] = io.input_str ("Face type", default_face_type, ['wf','head'], help_message="whole face / head").lower()
default_d_dims = self.options['d_dims'] = self.load_or_def_option('d_dims', 64)
default_d_mask_dims = default_d_dims // 3
default_d_mask_dims += default_d_mask_dims % 2
default_d_mask_dims = self.options['d_mask_dims'] = self.load_or_def_option('d_mask_dims', default_d_mask_dims)
if self.is_first_run():
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dimensions", default_ae_dims, add_info="32-1024", help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
e_dims = np.clip ( io.input_int("Encoder dimensions", default_e_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['e_dims'] = e_dims + e_dims % 2
d_dims = np.clip ( io.input_int("Decoder dimensions", default_d_dims, add_info="16-256", help_message="More dims help to recognize more facial features and achieve sharper result, but require more VRAM. You can fine-tune model size to fit your GPU." ), 16, 256 )
self.options['d_dims'] = d_dims + d_dims % 2
d_mask_dims = np.clip ( io.input_int("Decoder mask dimensions", default_d_mask_dims, add_info="16-256", help_message="Typical mask dimensions = decoder dimensions / 3. If you manually cut out obstacles from the dst mask, you can increase this parameter to achieve better quality." ), 16, 256 )
self.options['d_mask_dims'] = d_mask_dims + d_mask_dims % 2
morph_factor = np.clip ( io.input_number ("Morph factor.", default_morph_factor, add_info="0.1 .. 0.5", help_message="The smaller the value, the more src-like facial expressions will appear. The larger the value, the less space there is to train a large dst faceset in the neural network. Typical fine value is 0.33"), 0.1, 0.5 )
self.options['morph_factor'] = morph_factor
if self.is_first_run() or ask_override:
if self.options['face_type'] == 'wf' or self.options['face_type'] == 'head':
self.options['masked_training'] = io.input_bool ("Masked training", default_masked_training, help_message="This option is available only for 'whole_face' or 'head' type. Masked training clips training area to full_face mask or XSeg mask, thus network will train the faces properly.")
self.options['eyes_mouth_prio'] = io.input_bool ("Eyes and mouth priority", default_eyes_mouth_prio, help_message='Helps to fix eye problems during training like "alien eyes" and wrong eyes direction. Also makes the detail of the teeth higher.')
self.options['uniform_yaw'] = io.input_bool ("Uniform yaw distribution of samples", default_uniform_yaw, help_message='Helps to fix blurry side faces due to small amount of them in the faceset.')
default_gan_power = self.options['gan_power'] = self.load_or_def_option('gan_power', 0.0)
default_gan_patch_size = self.options['gan_patch_size'] = self.load_or_def_option('gan_patch_size', self.options['resolution'] // 8)
default_gan_dims = self.options['gan_dims'] = self.load_or_def_option('gan_dims', 16)
if self.is_first_run() or ask_override:
self.options['models_opt_on_gpu'] = io.input_bool ("Place models and optimizer on GPU", default_models_opt_on_gpu, help_message="When you train on one GPU, by default model and optimizer weights are placed on GPU to accelerate the process. You can place they on CPU to free up extra VRAM, thus set bigger dimensions.")
self.options['lr_dropout'] = io.input_str (f"Use learning rate dropout", default_lr_dropout, ['n','y','cpu'], help_message="When the face is trained enough, you can enable this option to get extra sharpness and reduce subpixel shake for less amount of iterations. Enabled it before `disable random warp` and before GAN. \nn - disabled.\ny - enabled\ncpu - enabled on CPU. This allows not to use extra VRAM, sacrificing 20% time of iteration.")
self.options['random_warp'] = io.input_bool ("Enable random warp of samples", default_random_warp, help_message="Random warp is required to generalize facial expressions of both faces. When the face is trained enough, you can disable it to get extra sharpness and reduce subpixel shake for less amount of iterations.")
self.options['gan_power'] = np.clip ( io.input_number ("GAN power", default_gan_power, add_info="0.0 .. 1.0", help_message="Forces the neural network to learn small details of the face. Enable it only when the face is trained enough with lr_dropout(on) and random_warp(off), and don't disable. The higher the value, the higher the chances of artifacts. Typical fine value is 0.1"), 0.0, 1.0 )
if self.options['gan_power'] != 0.0:
gan_patch_size = np.clip ( io.input_int("GAN patch size", default_gan_patch_size, add_info="3-640", help_message="The higher patch size, the higher the quality, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is resolution / 8." ), 3, 640 )
self.options['gan_patch_size'] = gan_patch_size
gan_dims = np.clip ( io.input_int("GAN dimensions", default_gan_dims, add_info="4-64", help_message="The dimensions of the GAN network. The higher dimensions, the more VRAM is required. You can get sharper edges even at the lowest setting. Typical fine value is 16." ), 4, 64 )
self.options['gan_dims'] = gan_dims
self.options['ct_mode'] = io.input_str (f"Color transfer for src faceset", default_ct_mode, ['none','rct','lct','mkl','idt','sot'], help_message="Change color distribution of src samples close to dst samples. Try all modes to find the best.")
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, uniform_yaw=Y")
self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims'])
self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False)
#override
def on_initialize(self):
device_config = nn.getCurrentDeviceConfig()
devices = device_config.devices
self.model_data_format = "NCHW"
nn.initialize(data_format=self.model_data_format)
tf = nn.tf
self.resolution = resolution = self.options['resolution']
lowest_dense_res = self.lowest_dense_res = resolution // 32
class Downscale(nn.ModelBase):
def __init__(self, in_ch, out_ch, kernel_size=5, *kwargs ):
self.in_ch = in_ch
self.out_ch = out_ch
self.kernel_size = kernel_size
super().__init__(*kwargs)
def on_build(self, *args, **kwargs ):
self.conv1 = nn.Conv2D( self.in_ch, self.out_ch, kernel_size=self.kernel_size, strides=2, padding='SAME')
def forward(self, x):
x = self.conv1(x)
x = tf.nn.leaky_relu(x, 0.1)
return x
def get_out_ch(self):
return self.out_ch
class Upscale(nn.ModelBase):
def on_build(self, in_ch, out_ch, kernel_size=3 ):
self.conv1 = nn.Conv2D( in_ch, out_ch*4, kernel_size=kernel_size, padding='SAME')
def forward(self, x):
x = self.conv1(x)
x = tf.nn.leaky_relu(x, 0.1)
x = nn.depth_to_space(x, 2)
return x
class ResidualBlock(nn.ModelBase):
def on_build(self, ch, kernel_size=3 ):
self.conv1 = nn.Conv2D( ch, ch, kernel_size=kernel_size, padding='SAME')
self.conv2 = nn.Conv2D( ch, ch, kernel_size=kernel_size, padding='SAME')
def forward(self, inp):
x = self.conv1(inp)
x = tf.nn.leaky_relu(x, 0.2)
x = self.conv2(x)
x = tf.nn.leaky_relu(inp+x, 0.2)
return x
class Encoder(nn.ModelBase):
def on_build(self, in_ch, e_ch, ae_ch):
self.down1 = Downscale(in_ch, e_ch, kernel_size=5)
self.res1 = ResidualBlock(e_ch)
self.down2 = Downscale(e_ch, e_ch*2, kernel_size=5)
self.down3 = Downscale(e_ch*2, e_ch*4, kernel_size=5)
self.down4 = Downscale(e_ch*4, e_ch*8, kernel_size=5)
self.down5 = Downscale(e_ch*8, e_ch*8, kernel_size=5)
self.res5 = ResidualBlock(e_ch*8)
self.dense1 = nn.Dense( lowest_dense_res*lowest_dense_res*e_ch*8, ae_ch )
def forward(self, inp):
x = inp
x = self.down1(x)
x = self.res1(x)
x = self.down2(x)
x = self.down3(x)
x = self.down4(x)
x = self.down5(x)
x = self.res5(x)
x = nn.flatten(x)
x = nn.pixel_norm(x, axes=-1)
x = self.dense1(x)
return x
class Inter(nn.ModelBase):
def __init__(self, ae_ch, ae_out_ch, **kwargs):
self.ae_ch, self.ae_out_ch = ae_ch, ae_out_ch
super().__init__(**kwargs)
def on_build(self):
ae_ch, ae_out_ch = self.ae_ch, self.ae_out_ch
self.dense2 = nn.Dense( ae_ch, lowest_dense_res * lowest_dense_res * ae_out_ch )
def forward(self, inp):
x = inp
x = self.dense2(x)
x = nn.reshape_4D (x, lowest_dense_res, lowest_dense_res, self.ae_out_ch)
return x
def get_out_ch(self):
return self.ae_out_ch
class Decoder(nn.ModelBase):
def on_build(self, in_ch, d_ch, d_mask_ch ):
self.upscale0 = Upscale(in_ch, d_ch*8, kernel_size=3)
self.upscale1 = Upscale(d_ch*8, d_ch*8, kernel_size=3)
self.upscale2 = Upscale(d_ch*8, d_ch*4, kernel_size=3)
self.upscale3 = Upscale(d_ch*4, d_ch*2, kernel_size=3)
self.res0 = ResidualBlock(d_ch*8, kernel_size=3)
self.res1 = ResidualBlock(d_ch*8, kernel_size=3)
self.res2 = ResidualBlock(d_ch*4, kernel_size=3)
self.res3 = ResidualBlock(d_ch*2, kernel_size=3)
self.upscalem0 = Upscale(in_ch, d_mask_ch*8, kernel_size=3)
self.upscalem1 = Upscale(d_mask_ch*8, d_mask_ch*8, kernel_size=3)
self.upscalem2 = Upscale(d_mask_ch*8, d_mask_ch*4, kernel_size=3)
self.upscalem3 = Upscale(d_mask_ch*4, d_mask_ch*2, kernel_size=3)
self.upscalem4 = Upscale(d_mask_ch*2, d_mask_ch*1, kernel_size=3)
self.out_convm = nn.Conv2D( d_mask_ch*1, 1, kernel_size=1, padding='SAME')
self.out_conv = nn.Conv2D( d_ch*2, 3, kernel_size=1, padding='SAME')
self.out_conv1 = nn.Conv2D( d_ch*2, 3, kernel_size=3, padding='SAME')
self.out_conv2 = nn.Conv2D( d_ch*2, 3, kernel_size=3, padding='SAME')
self.out_conv3 = nn.Conv2D( d_ch*2, 3, kernel_size=3, padding='SAME')
def forward(self, inp):
z = inp
x = self.upscale0(z)
x = self.res0(x)
x = self.upscale1(x)
x = self.res1(x)
x = self.upscale2(x)
x = self.res2(x)
x = self.upscale3(x)
x = self.res3(x)
x = tf.nn.sigmoid( nn.depth_to_space(tf.concat( (self.out_conv(x),
self.out_conv1(x),
self.out_conv2(x),
self.out_conv3(x)), nn.conv2d_ch_axis), 2) )
m = self.upscalem0(z)
m = self.upscalem1(m)
m = self.upscalem2(m)
m = self.upscalem3(m)
m = self.upscalem4(m)
m = tf.nn.sigmoid(self.out_convm(m))
return x, m
self.face_type = {'wf' : FaceType.WHOLE_FACE,
'head' : FaceType.HEAD}[ self.options['face_type'] ]
if 'eyes_prio' in self.options:
self.options.pop('eyes_prio')
eyes_mouth_prio = self.options['eyes_mouth_prio']
ae_dims = self.ae_dims = self.options['ae_dims']
e_dims = self.options['e_dims']
d_dims = self.options['d_dims']
d_mask_dims = self.options['d_mask_dims']
morph_factor = self.options['morph_factor']
pretrain = self.pretrain = self.options['pretrain']
if self.pretrain_just_disabled:
self.set_iter(0)
self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power']
random_warp = False if self.pretrain else self.options['random_warp']
random_src_flip = self.random_src_flip if not self.pretrain else True
random_dst_flip = self.random_dst_flip if not self.pretrain else True
if self.pretrain:
self.options_show_override['gan_power'] = 0.0
self.options_show_override['random_warp'] = False
self.options_show_override['lr_dropout'] = 'n'
self.options_show_override['uniform_yaw'] = True
masked_training = self.options['masked_training']
ct_mode = self.options['ct_mode']
if ct_mode == 'none':
ct_mode = None
models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu']
models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0'
optimizer_vars_on_cpu = models_opt_device=='/CPU:0'
input_ch=3
bgr_shape = self.bgr_shape = nn.get4Dshape(resolution,resolution,input_ch)
mask_shape = nn.get4Dshape(resolution,resolution,1)
self.model_filename_list = []
with tf.device ('/CPU:0'):
#Place holders on CPU
self.warped_src = tf.placeholder (nn.floatx, bgr_shape, name='warped_src')
self.warped_dst = tf.placeholder (nn.floatx, bgr_shape, name='warped_dst')
self.target_src = tf.placeholder (nn.floatx, bgr_shape, name='target_src')
self.target_dst = tf.placeholder (nn.floatx, bgr_shape, name='target_dst')
self.target_srcm = tf.placeholder (nn.floatx, mask_shape, name='target_srcm')
self.target_srcm_em = tf.placeholder (nn.floatx, mask_shape, name='target_srcm_em')
self.target_dstm = tf.placeholder (nn.floatx, mask_shape, name='target_dstm')
self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape, name='target_dstm_em')
self.morph_value_t = tf.placeholder (nn.floatx, (1,), name='morph_value_t')
# Initializing model classes
with tf.device (models_opt_device):
self.encoder = Encoder(in_ch=input_ch, e_ch=e_dims, ae_ch=ae_dims, name='encoder')
self.inter_src = Inter(ae_ch=ae_dims, ae_out_ch=ae_dims, name='inter_src')
self.inter_dst = Inter(ae_ch=ae_dims, ae_out_ch=ae_dims, name='inter_dst')
self.decoder = Decoder(in_ch=ae_dims, d_ch=d_dims, d_mask_ch=d_mask_dims, name='decoder')
self.model_filename_list += [ [self.encoder, 'encoder.npy'],
[self.inter_src, 'inter_src.npy'],
[self.inter_dst , 'inter_dst.npy'],
[self.decoder , 'decoder.npy'] ]
if self.is_training:
if gan_power != 0:
self.GAN = nn.UNetPatchDiscriminator(patch_size=self.options['gan_patch_size'], in_ch=input_ch, base_ch=self.options['gan_dims'], name="GAN")
self.model_filename_list += [ [self.GAN, 'GAN.npy'] ]
# Initialize optimizers
lr=5e-5
lr_dropout = 0.3 if self.options['lr_dropout'] in ['y','cpu'] and not self.pretrain else 1.0
clipnorm = 1.0 if self.options['clipgrad'] else 0.0
self.all_weights = self.encoder.get_weights() + self.inter_src.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights()
if pretrain:
self.trainable_weights = self.encoder.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights()
else:
self.trainable_weights = self.encoder.get_weights() + self.inter_src.get_weights() + self.inter_dst.get_weights() + self.decoder.get_weights()
self.src_dst_opt = nn.AdaBelief(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='src_dst_opt')
self.src_dst_opt.initialize_variables (self.all_weights, vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')
self.model_filename_list += [ (self.src_dst_opt, 'src_dst_opt.npy') ]
if gan_power != 0:
self.GAN_opt = nn.AdaBelief(lr=lr, lr_dropout=lr_dropout, clipnorm=clipnorm, name='GAN_opt')
self.GAN_opt.initialize_variables ( self.GAN.get_weights(), vars_on_cpu=optimizer_vars_on_cpu, lr_dropout_on_cpu=self.options['lr_dropout']=='cpu')#+self.D_src_x2.get_weights()
self.model_filename_list += [ (self.GAN_opt, 'GAN_opt.npy') ]
if self.is_training:
# Adjust batch size for multiple GPU
gpu_count = max(1, len(devices) )
bs_per_gpu = max(1, self.get_batch_size() // gpu_count)
self.set_batch_size( gpu_count*bs_per_gpu)
# Compute losses per GPU
gpu_pred_src_src_list = []
gpu_pred_dst_dst_list = []
gpu_pred_src_dst_list = []
gpu_pred_src_srcm_list = []
gpu_pred_dst_dstm_list = []
gpu_pred_src_dstm_list = []
gpu_src_losses = []
gpu_dst_losses = []
gpu_G_loss_gvs = []
gpu_GAN_loss_gvs = []
gpu_D_code_loss_gvs = []
gpu_D_src_dst_loss_gvs = []
for gpu_id in range(gpu_count):
with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
gpu_warped_src = self.warped_src [batch_slice,:,:,:]
gpu_warped_dst = self.warped_dst [batch_slice,:,:,:]
gpu_target_src = self.target_src [batch_slice,:,:,:]
gpu_target_dst = self.target_dst [batch_slice,:,:,:]
gpu_target_srcm = self.target_srcm[batch_slice,:,:,:]
gpu_target_srcm_em = self.target_srcm_em[batch_slice,:,:,:]
gpu_target_dstm = self.target_dstm[batch_slice,:,:,:]
gpu_target_dstm_em = self.target_dstm_em[batch_slice,:,:,:]
# process model tensors
gpu_src_code = self.encoder (gpu_warped_src)
gpu_dst_code = self.encoder (gpu_warped_dst)
if pretrain:
gpu_src_inter_src_code = self.inter_src (gpu_src_code)
gpu_dst_inter_dst_code = self.inter_dst (gpu_dst_code)
gpu_src_code = gpu_src_inter_src_code * nn.random_binomial( [bs_per_gpu, gpu_src_inter_src_code.shape.as_list()[1], 1,1] , p=morph_factor)
gpu_dst_code = gpu_src_dst_code = gpu_dst_inter_dst_code * nn.random_binomial( [bs_per_gpu, gpu_dst_inter_dst_code.shape.as_list()[1], 1,1] , p=0.25)
else:
gpu_src_inter_src_code = self.inter_src (gpu_src_code)
gpu_src_inter_dst_code = self.inter_dst (gpu_src_code)
gpu_dst_inter_src_code = self.inter_src (gpu_dst_code)
gpu_dst_inter_dst_code = self.inter_dst (gpu_dst_code)
inter_rnd_binomial = nn.random_binomial( [bs_per_gpu, gpu_src_inter_src_code.shape.as_list()[1], 1,1] , p=morph_factor)
gpu_src_code = gpu_src_inter_src_code * inter_rnd_binomial + gpu_src_inter_dst_code * (1-inter_rnd_binomial)
gpu_dst_code = gpu_dst_inter_dst_code
ae_dims_slice = tf.cast(ae_dims*self.morph_value_t[0], tf.int32)
gpu_src_dst_code = tf.concat( (tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]),
tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 )
gpu_pred_src_src, gpu_pred_src_srcm = self.decoder(gpu_src_code)
gpu_pred_dst_dst, gpu_pred_dst_dstm = self.decoder(gpu_dst_code)
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code)
gpu_pred_src_src_list.append(gpu_pred_src_src)
gpu_pred_dst_dst_list.append(gpu_pred_dst_dst)
gpu_pred_src_dst_list.append(gpu_pred_src_dst)
gpu_pred_src_srcm_list.append(gpu_pred_src_srcm)
gpu_pred_dst_dstm_list.append(gpu_pred_dst_dstm)
gpu_pred_src_dstm_list.append(gpu_pred_src_dstm)
gpu_target_srcm_blur = nn.gaussian_blur(gpu_target_srcm, max(1, resolution // 32) )
gpu_target_srcm_blur = tf.clip_by_value(gpu_target_srcm_blur, 0, 0.5) * 2
gpu_target_dstm_blur = nn.gaussian_blur(gpu_target_dstm, max(1, resolution // 32) )
gpu_target_dstm_blur = tf.clip_by_value(gpu_target_dstm_blur, 0, 0.5) * 2
gpu_target_dst_anti_masked = gpu_target_dst*(1.0-gpu_target_dstm_blur)
gpu_target_src_anti_masked = gpu_target_src*(1.0-gpu_target_srcm_blur)
gpu_target_src_masked_opt = gpu_target_src*gpu_target_srcm_blur if masked_training else gpu_target_src
gpu_target_dst_masked_opt = gpu_target_dst*gpu_target_dstm_blur if masked_training else gpu_target_dst
gpu_pred_src_src_masked_opt = gpu_pred_src_src*gpu_target_srcm_blur if masked_training else gpu_pred_src_src
gpu_pred_src_src_anti_masked = gpu_pred_src_src*(1.0-gpu_target_srcm_blur)
gpu_pred_dst_dst_masked_opt = gpu_pred_dst_dst*gpu_target_dstm_blur if masked_training else gpu_pred_dst_dst
gpu_pred_dst_dst_anti_masked = gpu_pred_dst_dst*(1.0-gpu_target_dstm_blur)
if resolution < 256:
gpu_dst_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1])
else:
gpu_dst_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/11.6) ), axis=[1])
gpu_dst_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_dst_masked_opt, gpu_pred_dst_dst_masked_opt, max_val=1.0, filter_size=int(resolution/23.2) ), axis=[1])
gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dst_masked_opt- gpu_pred_dst_dst_masked_opt ), axis=[1,2,3])
if eyes_mouth_prio:
gpu_dst_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_dst*gpu_target_dstm_em - gpu_pred_dst_dst*gpu_target_dstm_em ), axis=[1,2,3])
gpu_dst_loss += tf.reduce_mean ( 10*tf.square( gpu_target_dstm - gpu_pred_dst_dstm ),axis=[1,2,3] )
gpu_dst_loss += 0.1*tf.reduce_mean(tf.square(gpu_pred_dst_dst_anti_masked-gpu_target_dst_anti_masked),axis=[1,2,3] )
gpu_dst_losses += [gpu_dst_loss]
if not pretrain:
if resolution < 256:
gpu_src_loss = tf.reduce_mean ( 10*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
else:
gpu_src_loss = tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
gpu_src_loss += tf.reduce_mean ( 5*nn.dssim(gpu_target_src_masked_opt, gpu_pred_src_src_masked_opt, max_val=1.0, filter_size=int(resolution/23.2)), axis=[1])
gpu_src_loss += tf.reduce_mean ( 10*tf.square ( gpu_target_src_masked_opt - gpu_pred_src_src_masked_opt ), axis=[1,2,3])
if eyes_mouth_prio:
gpu_src_loss += tf.reduce_mean ( 300*tf.abs ( gpu_target_src*gpu_target_srcm_em - gpu_pred_src_src*gpu_target_srcm_em ), axis=[1,2,3])
gpu_src_loss += tf.reduce_mean ( 10*tf.square( gpu_target_srcm - gpu_pred_src_srcm ),axis=[1,2,3] )
else:
gpu_src_loss = gpu_dst_loss
gpu_src_losses += [gpu_src_loss]
if pretrain:
gpu_G_loss = gpu_dst_loss
else:
gpu_G_loss = gpu_src_loss + gpu_dst_loss
def DLossOnes(logits):
return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits), logits=logits), axis=[1,2,3])
def DLossZeros(logits):
return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits), logits=logits), axis=[1,2,3])
if gan_power != 0:
gpu_pred_src_src_d, gpu_pred_src_src_d2 = self.GAN(gpu_pred_src_src_masked_opt)
gpu_pred_dst_dst_d, gpu_pred_dst_dst_d2 = self.GAN(gpu_pred_dst_dst_masked_opt)
gpu_target_src_d, gpu_target_src_d2 = self.GAN(gpu_target_src_masked_opt)
gpu_target_dst_d, gpu_target_dst_d2 = self.GAN(gpu_target_dst_masked_opt)
gpu_D_src_dst_loss = (DLossOnes (gpu_target_src_d) + DLossOnes (gpu_target_src_d2) + \
DLossZeros(gpu_pred_src_src_d) + DLossZeros(gpu_pred_src_src_d2) + \
DLossOnes (gpu_target_dst_d) + DLossOnes (gpu_target_dst_d2) + \
DLossZeros(gpu_pred_dst_dst_d) + DLossZeros(gpu_pred_dst_dst_d2)
) * ( 1.0 / 8)
gpu_D_src_dst_loss_gvs += [ nn.gradients (gpu_D_src_dst_loss, self.GAN.get_weights() ) ]
gpu_G_loss += (DLossOnes(gpu_pred_src_src_d) + DLossOnes(gpu_pred_src_src_d2) + \
DLossOnes(gpu_pred_dst_dst_d) + DLossOnes(gpu_pred_dst_dst_d2)
) * gan_power
if masked_training:
# Minimal src-src-bg rec with total_variation_mse to suppress random bright dots from gan
gpu_G_loss += 0.000001*nn.total_variation_mse(gpu_pred_src_src)
gpu_G_loss += 0.02*tf.reduce_mean(tf.square(gpu_pred_src_src_anti_masked-gpu_target_src_anti_masked),axis=[1,2,3] )
gpu_G_loss_gvs += [ nn.gradients ( gpu_G_loss, self.trainable_weights ) ]
# Average losses and gradients, and create optimizer update ops
with tf.device(f'/CPU:0'):
pred_src_src = nn.concat(gpu_pred_src_src_list, 0)
pred_dst_dst = nn.concat(gpu_pred_dst_dst_list, 0)
pred_src_dst = nn.concat(gpu_pred_src_dst_list, 0)
pred_src_srcm = nn.concat(gpu_pred_src_srcm_list, 0)
pred_dst_dstm = nn.concat(gpu_pred_dst_dstm_list, 0)
pred_src_dstm = nn.concat(gpu_pred_src_dstm_list, 0)
with tf.device (models_opt_device):
src_loss = tf.concat(gpu_src_losses, 0)
dst_loss = tf.concat(gpu_dst_losses, 0)
src_dst_loss_gv_op = self.src_dst_opt.get_update_op (nn.average_gv_list (gpu_G_loss_gvs))
if gan_power != 0:
src_D_src_dst_loss_gv_op = self.GAN_opt.get_update_op (nn.average_gv_list(gpu_D_src_dst_loss_gvs) )
#GAN_loss_gv_op = self.src_dst_opt.get_update_op (nn.average_gv_list(gpu_GAN_loss_gvs) )
# Initializing training and view functions
def src_dst_train(warped_src, target_src, target_srcm, target_srcm_em, \
warped_dst, target_dst, target_dstm, target_dstm_em, ):
s, d, _ = nn.tf_sess.run ( [ src_loss, dst_loss, src_dst_loss_gv_op],
feed_dict={self.warped_src :warped_src,
self.target_src :target_src,
self.target_srcm:target_srcm,
self.target_srcm_em:target_srcm_em,
self.warped_dst :warped_dst,
self.target_dst :target_dst,
self.target_dstm:target_dstm,
self.target_dstm_em:target_dstm_em,
})
return s, d
self.src_dst_train = src_dst_train
if gan_power != 0:
def D_src_dst_train(warped_src, target_src, target_srcm, target_srcm_em, \
warped_dst, target_dst, target_dstm, target_dstm_em, ):
nn.tf_sess.run ([src_D_src_dst_loss_gv_op], feed_dict={self.warped_src :warped_src,
self.target_src :target_src,
self.target_srcm:target_srcm,
self.target_srcm_em:target_srcm_em,
self.warped_dst :warped_dst,
self.target_dst :target_dst,
self.target_dstm:target_dstm,
self.target_dstm_em:target_dstm_em})
self.D_src_dst_train = D_src_dst_train
def AE_view(warped_src, warped_dst, morph_value):
return nn.tf_sess.run ( [pred_src_src, pred_dst_dst, pred_dst_dstm, pred_src_dst, pred_src_dstm],
feed_dict={self.warped_src:warped_src, self.warped_dst:warped_dst, self.morph_value_t:[morph_value] })
self.AE_view = AE_view
else:
#Initializing merge function
with tf.device( nn.tf_default_device_name if len(devices) != 0 else f'/CPU:0'):
gpu_dst_code = self.encoder (self.warped_dst)
gpu_dst_inter_src_code = self.inter_src ( gpu_dst_code)
gpu_dst_inter_dst_code = self.inter_dst ( gpu_dst_code)
ae_dims_slice = tf.cast(ae_dims*self.morph_value_t[0], tf.int32)
gpu_src_dst_code = tf.concat( ( tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]),
tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 )
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_dst_inter_src_code)
_, gpu_pred_dst_dstm = self.decoder(gpu_dst_inter_dst_code)
def AE_merge(warped_dst, morph_value):
return nn.tf_sess.run ( [gpu_pred_src_dst, gpu_pred_dst_dstm, gpu_pred_src_dstm], feed_dict={self.warped_dst:warped_dst, self.morph_value_t:[morph_value] })
self.AE_merge = AE_merge
# Loading/initializing all models/optimizers weights
for model, filename in io.progress_bar_generator(self.model_filename_list, "Initializing models"):
if self.pretrain_just_disabled:
do_init = False
if model == self.inter_src or model == self.inter_dst:
do_init = True
else:
do_init = self.is_first_run()
if self.is_training and gan_power != 0 and model == self.GAN:
if self.gan_model_changed:
do_init = True
if not do_init:
do_init = not model.load_weights( self.get_strpath_storage_for_file(filename) )
if do_init:
model.init_weights()
###############
# initializing sample generators
if self.is_training:
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
training_data_dst_path = self.training_data_dst_path if not self.pretrain else self.get_pretraining_data_path()
random_ct_samples_path=training_data_dst_path if ct_mode is not None and not self.pretrain else None
cpu_count = min(multiprocessing.cpu_count(), 8)
src_generators_count = cpu_count // 2
dst_generators_count = cpu_count // 2
if ct_mode is not None:
src_generators_count = int(src_generators_count * 1.5)
self.set_training_data_generators ([
SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=random_src_flip),
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.EYES_MOUTH, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
],
uniform_yaw_distribution=self.options['uniform_yaw'] or self.pretrain,
generators_count=src_generators_count ),
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=random_dst_flip),
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.EYES_MOUTH, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
],
uniform_yaw_distribution=self.options['uniform_yaw'] or self.pretrain,
generators_count=dst_generators_count )
])
self.last_src_samples_loss = []
self.last_dst_samples_loss = []
if self.pretrain_just_disabled:
self.update_sample_for_preview(force_new=True)
def dump_ckpt(self):
tf = nn.tf
with tf.device (nn.tf_default_device_name):
warped_dst = tf.placeholder (nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face')
warped_dst = tf.transpose(warped_dst, (0,3,1,2))
morph_value = tf.placeholder (nn.floatx, (1,), name='morph_value')
gpu_dst_code = self.encoder (warped_dst)
gpu_dst_inter_src_code = self.inter_src ( gpu_dst_code)
gpu_dst_inter_dst_code = self.inter_dst ( gpu_dst_code)
ae_dims_slice = tf.cast(self.ae_dims*morph_value[0], tf.int32)
gpu_src_dst_code = tf.concat( (tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , self.lowest_dense_res, self.lowest_dense_res]),
tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,self.ae_dims-ae_dims_slice, self.lowest_dense_res,self.lowest_dense_res]) ), 1 )
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code)
_, gpu_pred_dst_dstm = self.decoder(gpu_dst_inter_dst_code)
gpu_pred_src_dst = tf.transpose(gpu_pred_src_dst, (0,2,3,1))
gpu_pred_dst_dstm = tf.transpose(gpu_pred_dst_dstm, (0,2,3,1))
gpu_pred_src_dstm = tf.transpose(gpu_pred_src_dstm, (0,2,3,1))
tf.identity(gpu_pred_dst_dstm, name='out_face_mask')
tf.identity(gpu_pred_src_dst, name='out_celeb_face')
tf.identity(gpu_pred_src_dstm, name='out_celeb_face_mask')
output_graph_def = tf.graph_util.convert_variables_to_constants(
nn.tf_sess,
tf.get_default_graph().as_graph_def(),
['out_face_mask','out_celeb_face','out_celeb_face_mask']
)
pb_filepath = self.get_strpath_storage_for_file('.pb')
with tf.gfile.GFile(pb_filepath, "wb") as f:
f.write(output_graph_def.SerializeToString())
#override
def get_model_filename_list(self):
return self.model_filename_list
#override
def onSave(self):
for model, filename in io.progress_bar_generator(self.get_model_filename_list(), "Saving", leave=False):
model.save_weights ( self.get_strpath_storage_for_file(filename) )
#override
def should_save_preview_history(self):
return (not io.is_colab() and self.iter % ( 10*(max(1,self.resolution // 64)) ) == 0) or \
(io.is_colab() and self.iter % 100 == 0)
#override
def onTrainOneIter(self):
bs = self.get_batch_size()
( (warped_src, target_src, target_srcm, target_srcm_em), \
(warped_dst, target_dst, target_dstm, target_dstm_em) ) = self.generate_next_samples()
src_loss, dst_loss = self.src_dst_train (warped_src, target_src, target_srcm, target_srcm_em, warped_dst, target_dst, target_dstm, target_dstm_em)
for i in range(bs):
self.last_src_samples_loss.append ( (target_src[i], target_srcm[i], target_srcm_em[i], src_loss[i] ) )
self.last_dst_samples_loss.append ( (target_dst[i], target_dstm[i], target_dstm_em[i], dst_loss[i] ) )
if len(self.last_src_samples_loss) >= bs*16:
src_samples_loss = sorted(self.last_src_samples_loss, key=operator.itemgetter(3), reverse=True)
dst_samples_loss = sorted(self.last_dst_samples_loss, key=operator.itemgetter(3), reverse=True)
target_src = np.stack( [ x[0] for x in src_samples_loss[:bs] ] )
target_srcm = np.stack( [ x[1] for x in src_samples_loss[:bs] ] )
target_srcm_em = np.stack( [ x[2] for x in src_samples_loss[:bs] ] )
target_dst = np.stack( [ x[0] for x in dst_samples_loss[:bs] ] )
target_dstm = np.stack( [ x[1] for x in dst_samples_loss[:bs] ] )
target_dstm_em = np.stack( [ x[2] for x in dst_samples_loss[:bs] ] )
src_loss, dst_loss = self.src_dst_train (target_src, target_src, target_srcm, target_srcm_em, target_dst, target_dst, target_dstm, target_dstm_em)
self.last_src_samples_loss = []
self.last_dst_samples_loss = []
if self.gan_power != 0:
self.D_src_dst_train (warped_src, target_src, target_srcm, target_srcm_em, warped_dst, target_dst, target_dstm, target_dstm_em)
return ( ('src_loss', np.mean(src_loss) ), ('dst_loss', np.mean(dst_loss) ), )
#override
def onGetPreview(self, samples):
( (warped_src, target_src, target_srcm, target_srcm_em),
(warped_dst, target_dst, target_dstm, target_dstm_em) ) = samples
S, D, SS, DD, DDM_000, _, _ = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in ([target_src,target_dst] + self.AE_view (target_src, target_dst, 0.0) ) ]
_, _, DDM_025, SD_025, SDM_025 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.25) ]
_, _, DDM_050, SD_050, SDM_050 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.50) ]
_, _, DDM_065, SD_065, SDM_065 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.65) ]
_, _, DDM_075, SD_075, SDM_075 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 0.75) ]
_, _, DDM_100, SD_100, SDM_100 = [ np.clip( nn.to_data_format(x,"NHWC", self.model_data_format), 0.0, 1.0) for x in self.AE_view (target_src, target_dst, 1.00) ]
(DDM_000,
DDM_025, SDM_025,
DDM_050, SDM_050,
DDM_065, SDM_065,
DDM_075, SDM_075,
DDM_100, SDM_100) = [ np.repeat (x, (3,), -1) for x in (DDM_000,
DDM_025, SDM_025,
DDM_050, SDM_050,
DDM_065, SDM_065,
DDM_075, SDM_075,
DDM_100, SDM_100) ]
target_srcm, target_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format) for x in ([target_srcm, target_dstm] )]
n_samples = min(4, self.get_batch_size(), 800 // self.resolution )
result = []
i = np.random.randint(n_samples)
st = [ np.concatenate ((S[i], D[i], DD[i]*DDM_000[i]), axis=1) ]
st += [ np.concatenate ((SS[i], DD[i], SD_075[i] ), axis=1) ]
result += [ ('AMP morph 0.75', np.concatenate (st, axis=0 )), ]
st = [ np.concatenate ((DD[i], SD_025[i], SD_050[i]), axis=1) ]
st += [ np.concatenate ((SD_065[i], SD_075[i], SD_100[i]), axis=1) ]
result += [ ('AMP morph list', np.concatenate (st, axis=0 )), ]
st = [ np.concatenate ((DD[i], SD_025[i]*DDM_025[i]*SDM_025[i], SD_050[i]*DDM_050[i]*SDM_050[i]), axis=1) ]
st += [ np.concatenate ((SD_065[i]*DDM_065[i]*SDM_065[i], SD_075[i]*DDM_075[i]*SDM_075[i], SD_100[i]*DDM_100[i]*SDM_100[i]), axis=1) ]
result += [ ('AMP morph list masked', np.concatenate (st, axis=0 )), ]
return result
def predictor_func (self, face, morph_value):
face = nn.to_data_format(face[None,...], self.model_data_format, "NHWC")
bgr, mask_dst_dstm, mask_src_dstm = [ nn.to_data_format(x,"NHWC", self.model_data_format).astype(np.float32) for x in self.AE_merge (face, morph_value) ]
return bgr[0], mask_src_dstm[0][...,0], mask_dst_dstm[0][...,0]
#override
def get_MergerConfig(self):
morph_factor = np.clip ( io.input_number ("Morph factor", 0.75, add_info="0.0 .. 1.0"), 0.0, 1.0 )
def predictor_morph(face):
return self.predictor_func(face, morph_factor)
import merger
return predictor_morph, (self.options['resolution'], self.options['resolution'], 3), merger.MergerConfigMasked(face_type=self.face_type, default_mode = 'overlay')
Model = AMPModel

View file

@ -0,0 +1 @@
from .Model import Model

View file

@ -31,7 +31,7 @@ class QModel(ModelBase):
masked_training = True masked_training = True
models_opt_on_gpu = len(devices) >= 1 and all([dev.total_mem_gb >= 4 for dev in devices]) models_opt_on_gpu = len(devices) >= 1 and all([dev.total_mem_gb >= 4 for dev in devices])
models_opt_device = '/GPU:0' if models_opt_on_gpu and self.is_training else '/CPU:0' models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0'
optimizer_vars_on_cpu = models_opt_device=='/CPU:0' optimizer_vars_on_cpu = models_opt_device=='/CPU:0'
input_ch = 3 input_ch = 3
@ -96,7 +96,7 @@ class QModel(ModelBase):
gpu_src_dst_loss_gvs = [] gpu_src_dst_loss_gvs = []
for gpu_id in range(gpu_count): for gpu_id in range(gpu_count):
with tf.device( f'/GPU:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
with tf.device(f'/CPU:0'): with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first # slice on CPU, otherwise all batch data will be transfered to GPU first
@ -190,7 +190,7 @@ class QModel(ModelBase):
self.AE_view = AE_view self.AE_view = AE_view
else: else:
# Initializing merge function # Initializing merge function
with tf.device( f'/GPU:0' if len(devices) != 0 else f'/CPU:0'): with tf.device( nn.tf_default_device_name if len(devices) != 0 else f'/CPU:0'):
gpu_dst_code = self.inter(self.encoder(self.warped_dst)) gpu_dst_code = self.inter(self.encoder(self.warped_dst))
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code) gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code)
_, gpu_pred_dst_dstm = self.decoder_dst(gpu_dst_code) _, gpu_pred_dst_dstm = self.decoder_dst(gpu_dst_code)

View file

@ -70,7 +70,8 @@ class SAEHDModel(ModelBase):
self.ask_autobackup_hour() self.ask_autobackup_hour()
self.ask_write_preview_history() self.ask_write_preview_history()
self.ask_target_iter() self.ask_target_iter()
self.ask_random_flip() self.ask_random_src_flip()
self.ask_random_dst_flip()
self.ask_batch_size(suggest_batch_size) self.ask_batch_size(suggest_batch_size)
if self.is_first_run(): if self.is_first_run():
@ -191,7 +192,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.options['random_color'] = io.input_bool ("Random color", default_random_color, help_message="Samples are randomly rotated around the L axis in LAB colorspace, helps generalize training") self.options['random_color'] = io.input_bool ("Random color", default_random_color, help_message="Samples are randomly rotated around the L axis in LAB colorspace, helps generalize training")
self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.") self.options['clipgrad'] = io.input_bool ("Enable gradient clipping", default_clipgrad, help_message="Gradient clipping reduces chance of model collapse, sacrificing speed of training.")
self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly.") self.options['pretrain'] = io.input_bool ("Enable pretraining mode", default_pretrain, help_message="Pretrain the model with large amount of various faces. After that, model can be used to train the fakes more quickly. Forces random_warp=N, random_flips=Y, gan_power=0.0, lr_dropout=N, styles=0.0, uniform_yaw=Y")
if self.options['pretrain'] and self.get_pretraining_data_path() is None: if self.options['pretrain'] and self.get_pretraining_data_path() is None:
raise Exception("pretraining_data_path is not defined") raise Exception("pretraining_data_path is not defined")
@ -225,6 +226,8 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
archi_type, archi_opts = archi_split archi_type, archi_opts = archi_split
elif len(archi_split) == 1: elif len(archi_split) == 1:
archi_type, archi_opts = archi_split[0], None archi_type, archi_opts = archi_split[0], None
self.archi_type = archi_type
ae_dims = self.options['ae_dims'] ae_dims = self.options['ae_dims']
e_dims = self.options['e_dims'] e_dims = self.options['e_dims']
@ -238,7 +241,9 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power'] self.gan_power = gan_power = 0.0 if self.pretrain else self.options['gan_power']
random_warp = False if self.pretrain else self.options['random_warp'] random_warp = False if self.pretrain else self.options['random_warp']
random_src_flip = self.random_src_flip if not self.pretrain else True
random_dst_flip = self.random_dst_flip if not self.pretrain else True
if self.pretrain: if self.pretrain:
self.options_show_override['gan_power'] = 0.0 self.options_show_override['gan_power'] = 0.0
self.options_show_override['random_warp'] = False self.options_show_override['random_warp'] = False
@ -251,28 +256,29 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
ct_mode = self.options['ct_mode'] ct_mode = self.options['ct_mode']
if ct_mode == 'none': if ct_mode == 'none':
ct_mode = None ct_mode = None
models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu'] models_opt_on_gpu = False if len(devices) == 0 else self.options['models_opt_on_gpu']
models_opt_device = '/GPU:0' if models_opt_on_gpu and self.is_training else '/CPU:0' models_opt_device = nn.tf_default_device_name if models_opt_on_gpu and self.is_training else '/CPU:0'
optimizer_vars_on_cpu = models_opt_device=='/CPU:0' optimizer_vars_on_cpu = models_opt_device=='/CPU:0'
input_ch=3 input_ch=3
bgr_shape = nn.get4Dshape(resolution,resolution,input_ch) bgr_shape = self.bgr_shape = nn.get4Dshape(resolution,resolution,input_ch)
mask_shape = nn.get4Dshape(resolution,resolution,1) mask_shape = nn.get4Dshape(resolution,resolution,1)
self.model_filename_list = [] self.model_filename_list = []
with tf.device ('/CPU:0'): with tf.device ('/CPU:0'):
#Place holders on CPU #Place holders on CPU
self.warped_src = tf.placeholder (nn.floatx, bgr_shape) self.warped_src = tf.placeholder (nn.floatx, bgr_shape, name='warped_src')
self.warped_dst = tf.placeholder (nn.floatx, bgr_shape) self.warped_dst = tf.placeholder (nn.floatx, bgr_shape, name='warped_dst')
self.target_src = tf.placeholder (nn.floatx, bgr_shape) self.target_src = tf.placeholder (nn.floatx, bgr_shape, name='target_src')
self.target_dst = tf.placeholder (nn.floatx, bgr_shape) self.target_dst = tf.placeholder (nn.floatx, bgr_shape, name='target_dst')
self.target_srcm = tf.placeholder (nn.floatx, mask_shape) self.target_srcm = tf.placeholder (nn.floatx, mask_shape, name='target_srcm')
self.target_srcm_em = tf.placeholder (nn.floatx, mask_shape) self.target_srcm_em = tf.placeholder (nn.floatx, mask_shape, name='target_srcm_em')
self.target_dstm = tf.placeholder (nn.floatx, mask_shape) self.target_dstm = tf.placeholder (nn.floatx, mask_shape, name='target_dstm')
self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape) self.target_dstm_em = tf.placeholder (nn.floatx, mask_shape, name='target_dstm_em')
# Initializing model classes # Initializing model classes
model_archi = nn.DeepFakeArchi(resolution, opts=archi_opts) model_archi = nn.DeepFakeArchi(resolution, opts=archi_opts)
@ -361,7 +367,6 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
bs_per_gpu = max(1, self.get_batch_size() // gpu_count) bs_per_gpu = max(1, self.get_batch_size() // gpu_count)
self.set_batch_size( gpu_count*bs_per_gpu) self.set_batch_size( gpu_count*bs_per_gpu)
# Compute losses per GPU # Compute losses per GPU
gpu_pred_src_src_list = [] gpu_pred_src_src_list = []
gpu_pred_dst_dst_list = [] gpu_pred_dst_dst_list = []
@ -375,9 +380,9 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
gpu_G_loss_gvs = [] gpu_G_loss_gvs = []
gpu_D_code_loss_gvs = [] gpu_D_code_loss_gvs = []
gpu_D_src_dst_loss_gvs = [] gpu_D_src_dst_loss_gvs = []
for gpu_id in range(gpu_count): for gpu_id in range(gpu_count):
with tf.device( f'/GPU:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
with tf.device(f'/CPU:0'): with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first # slice on CPU, otherwise all batch data will be transfered to GPU first
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
@ -679,7 +684,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.AE_view = AE_view self.AE_view = AE_view
else: else:
# Initializing merge function # Initializing merge function
with tf.device( f'/GPU:0' if len(devices) != 0 else f'/CPU:0'): with tf.device( nn.tf_default_device_name if len(devices) != 0 else f'/CPU:0'):
if 'df' in archi_type: if 'df' in archi_type:
gpu_dst_code = self.inter(self.encoder(self.warped_dst)) gpu_dst_code = self.inter(self.encoder(self.warped_dst))
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code) gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code)
@ -722,7 +727,10 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
if do_init: if do_init:
model.init_weights() model.init_weights()
###############
# initializing sample generators # initializing sample generators
if self.is_training: if self.is_training:
training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path() training_data_src_path = self.training_data_src_path if not self.pretrain else self.get_pretraining_data_path()
@ -744,7 +752,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.set_training_data_generators ([ self.set_training_data_generators ([
SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(), SampleGeneratorFace(training_data_src_path, random_ct_samples_path=random_ct_samples_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), sample_process_options=SampleProcessor.Options(random_flip=random_src_flip),
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
@ -754,7 +762,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
generators_count=src_generators_count ), generators_count=src_generators_count ),
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=self.random_flip), sample_process_options=SampleProcessor.Options(random_flip=random_dst_flip),
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp, 'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False , 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
@ -769,7 +777,44 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
if self.pretrain_just_disabled: if self.pretrain_just_disabled:
self.update_sample_for_preview(force_new=True) self.update_sample_for_preview(force_new=True)
def dump_ckpt(self):
tf = nn.tf
with tf.device ('/CPU:0'):
warped_dst = tf.placeholder (nn.floatx, (None, self.resolution, self.resolution, 3), name='in_face')
warped_dst = tf.transpose(warped_dst, (0,3,1,2))
if 'df' in self.archi_type:
gpu_dst_code = self.inter(self.encoder(warped_dst))
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder_src(gpu_dst_code)
_, gpu_pred_dst_dstm = self.decoder_dst(gpu_dst_code)
elif 'liae' in self.archi_type:
gpu_dst_code = self.encoder (warped_dst)
gpu_dst_inter_B_code = self.inter_B (gpu_dst_code)
gpu_dst_inter_AB_code = self.inter_AB (gpu_dst_code)
gpu_dst_code = tf.concat([gpu_dst_inter_B_code,gpu_dst_inter_AB_code], nn.conv2d_ch_axis)
gpu_src_dst_code = tf.concat([gpu_dst_inter_AB_code,gpu_dst_inter_AB_code], nn.conv2d_ch_axis)
gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code)
_, gpu_pred_dst_dstm = self.decoder(gpu_dst_code)
gpu_pred_src_dst = tf.transpose(gpu_pred_src_dst, (0,2,3,1))
gpu_pred_dst_dstm = tf.transpose(gpu_pred_dst_dstm, (0,2,3,1))
gpu_pred_src_dstm = tf.transpose(gpu_pred_src_dstm, (0,2,3,1))
saver = tf.train.Saver()
tf.identity(gpu_pred_dst_dstm, name='out_face_mask')
tf.identity(gpu_pred_src_dst, name='out_celeb_face')
tf.identity(gpu_pred_src_dstm, name='out_celeb_face_mask')
saver.save(nn.tf_sess, self.get_strpath_storage_for_file('.ckpt') )
#override #override
def get_model_filename_list(self): def get_model_filename_list(self):
return self.model_filename_list return self.model_filename_list

View file

@ -52,7 +52,7 @@ class XSegModel(ModelBase):
'head' : FaceType.HEAD}[ self.options['face_type'] ] 'head' : FaceType.HEAD}[ self.options['face_type'] ]
place_model_on_cpu = len(devices) == 0 place_model_on_cpu = len(devices) == 0
models_opt_device = '/CPU:0' if place_model_on_cpu else '/GPU:0' models_opt_device = '/CPU:0' if place_model_on_cpu else nn.tf_default_device_name
bgr_shape = nn.get4Dshape(resolution,resolution,3) bgr_shape = nn.get4Dshape(resolution,resolution,3)
mask_shape = nn.get4Dshape(resolution,resolution,1) mask_shape = nn.get4Dshape(resolution,resolution,1)
@ -83,7 +83,7 @@ class XSegModel(ModelBase):
for gpu_id in range(gpu_count): for gpu_id in range(gpu_count):
with tf.device( f'/GPU:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device(f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
with tf.device(f'/CPU:0'): with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first # slice on CPU, otherwise all batch data will be transfered to GPU first
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
@ -95,6 +95,7 @@ class XSegModel(ModelBase):
gpu_pred_list.append(gpu_pred_t) gpu_pred_list.append(gpu_pred_t)
gpu_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=gpu_target_t, logits=gpu_pred_logits_t), axis=[1,2,3]) gpu_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=gpu_target_t, logits=gpu_pred_logits_t), axis=[1,2,3])
gpu_losses += [gpu_loss] gpu_losses += [gpu_loss]
gpu_loss_gvs += [ nn.gradients ( gpu_loss, self.model.get_weights() ) ] gpu_loss_gvs += [ nn.gradients ( gpu_loss, self.model.get_weights() ) ]

View file

@ -1,12 +1,12 @@
tqdm tqdm
numpy==1.19.3 numpy==1.19.3
h5py==2.9.0 h5py==2.10.0
opencv-python==4.1.0.25 opencv-python==4.1.0.25
ffmpeg-python==0.1.17 ffmpeg-python==0.1.17
scikit-image==0.14.2 scikit-image==0.14.2
scipy==1.4.1 scipy==1.4.1
colorama colorama
tensorflow-gpu==2.3.1 tensorflow-gpu==2.4.0
pyqt5 pyqt5
Flask==1.1.1 Flask==1.1.1
flask-socketio==4.2.1 flask-socketio==4.2.1

View file

@ -84,17 +84,18 @@ class PackedFaceset():
of.write ( struct.pack("Q", offset) ) of.write ( struct.pack("Q", offset) )
of.seek(0,2) of.seek(0,2)
of.close() of.close()
if io.input_bool(f"Delete original files?", True):
for filename in io.progress_bar_generator(image_paths, "Deleting files"):
Path(filename).unlink()
for filename in io.progress_bar_generator(image_paths, "Deleting files"): if as_person_faceset:
Path(filename).unlink() for dir_name in io.progress_bar_generator(dir_names, "Deleting dirs"):
dir_path = samples_path / dir_name
if as_person_faceset: try:
for dir_name in io.progress_bar_generator(dir_names, "Deleting dirs"): shutil.rmtree(dir_path)
dir_path = samples_path / dir_name except:
try: io.log_info (f"unable to remove: {dir_path} ")
shutil.rmtree(dir_path)
except:
io.log_info (f"unable to remove: {dir_path} ")
@staticmethod @staticmethod
def unpack(samples_path): def unpack(samples_path):

View file

@ -6,7 +6,7 @@ from enum import IntEnum
import cv2 import cv2
import numpy as np import numpy as np
from pathlib import Path
from core import imagelib, mplib, pathex from core import imagelib, mplib, pathex
from core.imagelib import sd from core.imagelib import sd
from core.cv2ex import * from core.cv2ex import *
@ -31,7 +31,7 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
if len(seg_sample_idxs) == 0: if len(seg_sample_idxs) == 0:
raise Exception(f"No segmented faces found.") raise Exception(f"No segmented faces found.")
else: else:
io.log_info(f"Using {len(seg_sample_idxs)} xseg labeled samples.") io.log_info(f"Using {len(seg_sample_idxs)} xseg labeled samples.")
else: else:
io.log_info(f"Using {len(seg_sample_idxs)} segmented samples.") io.log_info(f"Using {len(seg_sample_idxs)} segmented samples.")
@ -40,11 +40,11 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
else: else:
self.generators_count = max(1, generators_count) self.generators_count = max(1, generators_count)
args = (samples, seg_sample_idxs, resolution, face_type, data_format)
if self.debug: if self.debug:
self.generators = [ThisThreadGenerator ( self.batch_func, (samples, seg_sample_idxs, resolution, face_type, data_format) )] self.generators = [ThisThreadGenerator ( self.batch_func, args )]
else: else:
self.generators = [SubprocessGenerator ( self.batch_func, (samples, seg_sample_idxs, resolution, face_type, data_format), start_now=False ) \ self.generators = [SubprocessGenerator ( self.batch_func, args, start_now=False ) for i in range(self.generators_count) ]
for i in range(self.generators_count) ]
SubprocessGenerator.start_in_parallel( self.generators ) SubprocessGenerator.start_in_parallel( self.generators )
@ -77,17 +77,19 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
ty_range=[-0.05, 0.05] ty_range=[-0.05, 0.05]
random_bilinear_resize_chance, random_bilinear_resize_max_size_per = 25,75 random_bilinear_resize_chance, random_bilinear_resize_max_size_per = 25,75
sharpen_chance, sharpen_kernel_max_size = 25, 5
motion_blur_chance, motion_blur_mb_max_size = 25, 5 motion_blur_chance, motion_blur_mb_max_size = 25, 5
gaussian_blur_chance, gaussian_blur_kernel_max_size = 25, 5 gaussian_blur_chance, gaussian_blur_kernel_max_size = 25, 5
random_jpeg_compress_chance = 25
def gen_img_mask(sample): def gen_img_mask(sample):
img = sample.load_bgr() img = sample.load_bgr()
h,w,c = img.shape h,w,c = img.shape
if sample.seg_ie_polys.has_polys(): if sample.seg_ie_polys.has_polys():
mask = np.zeros ((h,w,1), dtype=np.float32) mask = np.zeros ((h,w,1), dtype=np.float32)
sample.seg_ie_polys.overlay_mask(mask) sample.seg_ie_polys.overlay_mask(mask)
elif sample.has_xseg_mask(): elif sample.has_xseg_mask():
mask = sample.get_xseg_mask() mask = sample.get_xseg_mask()
mask[mask < 0.5] = 0.0 mask[mask < 0.5] = 0.0
mask[mask >= 0.5] = 1.0 mask[mask >= 0.5] = 1.0
@ -121,7 +123,6 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
img, mask = gen_img_mask(sample) img, mask = gen_img_mask(sample)
if np.random.randint(2) == 0: if np.random.randint(2) == 0:
if len(bg_shuffle_idxs) == 0: if len(bg_shuffle_idxs) == 0:
bg_shuffle_idxs = seg_sample_idxs.copy() bg_shuffle_idxs = seg_sample_idxs.copy()
np.random.shuffle(bg_shuffle_idxs) np.random.shuffle(bg_shuffle_idxs)
@ -130,29 +131,57 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
bg_img, bg_mask = gen_img_mask(bg_sample) bg_img, bg_mask = gen_img_mask(bg_sample)
bg_wp = imagelib.gen_warp_params(resolution, True, rotation_range=[-180,180], scale_range=[-0.10, 0.10], tx_range=[-0.10, 0.10], ty_range=[-0.10, 0.10] ) bg_wp = imagelib.gen_warp_params(resolution, True, rotation_range=[-180,180], scale_range=[-0.10, 0.10], tx_range=[-0.10, 0.10], ty_range=[-0.10, 0.10] )
bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=False) bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=True)
bg_mask = imagelib.warp_by_params (bg_wp, bg_mask, can_warp=False, can_transform=True, can_flip=True, border_replicate=False) bg_mask = imagelib.warp_by_params (bg_wp, bg_mask, can_warp=False, can_transform=True, can_flip=True, border_replicate=False)
bg_img = bg_img*(1-bg_mask)
if np.random.randint(2) == 0:
bg_img = imagelib.apply_random_hsv_shift(bg_img)
else:
bg_img = imagelib.apply_random_rgb_levels(bg_img)
c_mask = (1-bg_mask) * (1-mask) c_mask = 1.0 - (1-bg_mask) * (1-mask)
img = img*(1-c_mask) + bg_img * c_mask rnd = 0.15 + np.random.uniform()*0.85
img = img*(c_mask) + img*(1-c_mask)*rnd + bg_img*(1-c_mask)*(1-rnd)
warp_params = imagelib.gen_warp_params(resolution, random_flip, rotation_range=rotation_range, scale_range=scale_range, tx_range=tx_range, ty_range=ty_range ) warp_params = imagelib.gen_warp_params(resolution, random_flip, rotation_range=rotation_range, scale_range=scale_range, tx_range=tx_range, ty_range=ty_range )
img = imagelib.warp_by_params (warp_params, img, can_warp=True, can_transform=True, can_flip=True, border_replicate=False) img = imagelib.warp_by_params (warp_params, img, can_warp=True, can_transform=True, can_flip=True, border_replicate=True)
mask = imagelib.warp_by_params (warp_params, mask, can_warp=True, can_transform=True, can_flip=True, border_replicate=False) mask = imagelib.warp_by_params (warp_params, mask, can_warp=True, can_transform=True, can_flip=True, border_replicate=False)
img = np.clip(img.astype(np.float32), 0, 1) img = np.clip(img.astype(np.float32), 0, 1)
mask[mask < 0.5] = 0.0 mask[mask < 0.5] = 0.0
mask[mask >= 0.5] = 1.0 mask[mask >= 0.5] = 1.0
mask = np.clip(mask, 0, 1) mask = np.clip(mask, 0, 1)
if np.random.randint(2) == 0:
# random face flare
krn = np.random.randint( resolution//4, resolution )
krn = krn - krn % 2 + 1
img = img + cv2.GaussianBlur(img*mask, (krn,krn), 0)
if np.random.randint(2) == 0:
# random bg flare
krn = np.random.randint( resolution//4, resolution )
krn = krn - krn % 2 + 1
img = img + cv2.GaussianBlur(img*(1-mask), (krn,krn), 0)
if np.random.randint(2) == 0: if np.random.randint(2) == 0:
img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution])) img = imagelib.apply_random_hsv_shift(img, mask=sd.random_circle_faded ([resolution,resolution]))
else: else:
img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution])) img = imagelib.apply_random_rgb_levels(img, mask=sd.random_circle_faded ([resolution,resolution]))
if np.random.randint(2) == 0:
img = imagelib.apply_random_sharpen( img, sharpen_chance, sharpen_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution]))
else:
img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution]))
img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution]))
if np.random.randint(2) == 0:
img = imagelib.apply_random_nearest_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution]))
else:
img = imagelib.apply_random_bilinear_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution]))
img = np.clip(img, 0, 1)
img = imagelib.apply_random_motion_blur( img, motion_blur_chance, motion_blur_mb_max_size, mask=sd.random_circle_faded ([resolution,resolution])) img = imagelib.apply_random_jpeg_compress( img, random_jpeg_compress_chance, mask=sd.random_circle_faded ([resolution,resolution]))
img = imagelib.apply_random_gaussian_blur( img, gaussian_blur_chance, gaussian_blur_kernel_max_size, mask=sd.random_circle_faded ([resolution,resolution]))
img = imagelib.apply_random_bilinear_resize( img, random_bilinear_resize_chance, random_bilinear_resize_max_size_per, mask=sd.random_circle_faded ([resolution,resolution]))
if data_format == "NCHW": if data_format == "NCHW":
img = np.transpose(img, (2,0,1) ) img = np.transpose(img, (2,0,1) )
@ -221,4 +250,48 @@ class SegmentedSampleFilterSubprocessor(Subprocessor):
if self.count_xseg_mask: if self.count_xseg_mask:
return idx, self.samples[idx].has_xseg_mask() return idx, self.samples[idx].has_xseg_mask()
else: else:
return idx, self.samples[idx].seg_ie_polys.get_pts_count() != 0 return idx, self.samples[idx].seg_ie_polys.get_pts_count() != 0
"""
bg_path = None
for path in paths:
bg_path = Path(path) / 'backgrounds'
if bg_path.exists():
break
if bg_path is None:
io.log_info(f'Random backgrounds will not be used. Place no face jpg images to aligned\backgrounds folder. ')
bg_pathes = None
else:
bg_pathes = pathex.get_image_paths(bg_path, image_extensions=['.jpg'], return_Path_class=True)
io.log_info(f'Using {len(bg_pathes)} random backgrounds from {bg_path}')
if bg_pathes is not None:
bg_path = bg_pathes[ np.random.randint(len(bg_pathes)) ]
bg_img = cv2_imread(bg_path)
if bg_img is not None:
bg_img = bg_img.astype(np.float32) / 255.0
bg_img = imagelib.normalize_channels(bg_img, 3)
bg_img = imagelib.random_crop(bg_img, resolution, resolution)
bg_img = cv2.resize(bg_img, (resolution, resolution), interpolation=cv2.INTER_LINEAR)
if np.random.randint(2) == 0:
bg_img = imagelib.apply_random_hsv_shift(bg_img)
else:
bg_img = imagelib.apply_random_rgb_levels(bg_img)
bg_wp = imagelib.gen_warp_params(resolution, True, rotation_range=[-180,180], scale_range=[0,0], tx_range=[0,0], ty_range=[0,0])
bg_img = imagelib.warp_by_params (bg_wp, bg_img, can_warp=False, can_transform=True, can_flip=True, border_replicate=True)
bg = img*(1-mask)
fg = img*mask
c_mask = sd.random_circle_faded ([resolution,resolution])
bg = ( bg_img*c_mask + bg*(1-c_mask) )*(1-mask)
img = fg+bg
else:
"""