mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-14 02:37:00 -07:00
New script:
5.XSeg) data_dst/src mask for XSeg trainer - fetch.bat Copies faces containing XSeg polygons to aligned_xseg\ dir. Useful only if you want to collect labeled faces and reuse them in other fakes. Now you can use trained XSeg mask in the SAEHD training process. It’s mean default ‘full_face’ mask obtained from landmarks will be replaced with the mask obtained from the trained XSeg model. use 5.XSeg.optional) trained mask for data_dst/data_src - apply.bat 5.XSeg.optional) trained mask for data_dst/data_src - remove.bat Normally you don’t need it. You can use it, if you want to use ‘face_style’ and ‘bg_style’ with obstructions. XSeg trainer : now you can choose type of face XSeg trainer : now you can restart training in “override settings” Merger: XSeg-* modes now can be used with all types of faces. Therefore old MaskEditor, FANSEG models, and FAN-x modes have been removed, because the new XSeg solution is better, simpler and more convenient, which costs only 1 hour of manual masking for regular deepfake.
This commit is contained in:
parent
e5bad483ca
commit
6d3607a13d
30 changed files with 279 additions and 1520 deletions
|
@ -14,7 +14,7 @@ import numpy as np
|
|||
import facelib
|
||||
from core import imagelib
|
||||
from core import mathlib
|
||||
from facelib import FaceType, LandmarksProcessor, TernausNet
|
||||
from facelib import FaceType, LandmarksProcessor
|
||||
from core.interact import interact as io
|
||||
from core.joblib import Subprocessor
|
||||
from core.leras import nn
|
||||
|
|
|
@ -1,571 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import numpy.linalg as npl
|
||||
|
||||
from core import imagelib
|
||||
from DFLIMG import *
|
||||
from facelib import LandmarksProcessor
|
||||
from core.imagelib import IEPolys
|
||||
from core.interact import interact as io
|
||||
from core import pathex
|
||||
from core.cv2ex import *
|
||||
|
||||
|
||||
class MaskEditor:
|
||||
STATE_NONE=0
|
||||
STATE_MASKING=1
|
||||
|
||||
def __init__(self, img, prev_images, next_images, mask=None, ie_polys=None, get_status_lines_func=None):
|
||||
self.img = imagelib.normalize_channels (img,3)
|
||||
h, w, c = img.shape
|
||||
|
||||
if h != w and w != 256:
|
||||
#to support any square res, scale img,mask and ie_polys to 256, then scale ie_polys back on .get_ie_polys()
|
||||
raise Exception ("MaskEditor does not support image size != 256x256")
|
||||
|
||||
ph, pw = h // 4, w // 4 #pad wh
|
||||
|
||||
self.prev_images = prev_images
|
||||
self.next_images = next_images
|
||||
|
||||
if mask is not None:
|
||||
self.mask = imagelib.normalize_channels (mask,3)
|
||||
else:
|
||||
self.mask = np.zeros ( (h,w,3) )
|
||||
self.get_status_lines_func = get_status_lines_func
|
||||
|
||||
self.state_prop = self.STATE_NONE
|
||||
|
||||
self.w, self.h = w, h
|
||||
self.pw, self.ph = pw, ph
|
||||
self.pwh = np.array([self.pw, self.ph])
|
||||
self.pwh2 = np.array([self.pw*2, self.ph*2])
|
||||
self.sw, self.sh = w+pw*2, h+ph*2
|
||||
self.prwh = 64 #preview wh
|
||||
|
||||
if ie_polys is None:
|
||||
ie_polys = IEPolys()
|
||||
self.ie_polys = ie_polys
|
||||
|
||||
self.polys_mask = None
|
||||
self.preview_images = None
|
||||
|
||||
self.mouse_x = self.mouse_y = 9999
|
||||
self.screen_status_block = None
|
||||
self.screen_status_block_dirty = True
|
||||
self.screen_changed = True
|
||||
|
||||
def set_state(self, state):
|
||||
self.state = state
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
return self.state_prop
|
||||
|
||||
@state.setter
|
||||
def state(self, value):
|
||||
self.state_prop = value
|
||||
if value == self.STATE_MASKING:
|
||||
self.ie_polys.dirty = True
|
||||
|
||||
def get_mask(self):
|
||||
if self.ie_polys.switch_dirty():
|
||||
self.screen_status_block_dirty = True
|
||||
self.ie_mask = img = self.mask.copy()
|
||||
|
||||
self.ie_polys.overlay_mask(img)
|
||||
|
||||
return img
|
||||
return self.ie_mask
|
||||
|
||||
def get_screen_overlay(self):
|
||||
img = np.zeros ( (self.sh, self.sw, 3) )
|
||||
|
||||
if self.state == self.STATE_MASKING:
|
||||
mouse_xy = self.mouse_xy.copy() + self.pwh
|
||||
l = self.ie_polys.n_list()
|
||||
if l.n > 0:
|
||||
p = l.cur_point().copy() + self.pwh
|
||||
color = (0,1,0) if l.type == 1 else (0,0,1)
|
||||
cv2.line(img, tuple(p), tuple(mouse_xy), color )
|
||||
|
||||
return img
|
||||
|
||||
def undo_to_begin_point(self):
|
||||
while not self.undo_point():
|
||||
pass
|
||||
|
||||
def undo_point(self):
|
||||
self.screen_changed = True
|
||||
if self.state == self.STATE_NONE:
|
||||
if self.ie_polys.n > 0:
|
||||
self.state = self.STATE_MASKING
|
||||
|
||||
if self.state == self.STATE_MASKING:
|
||||
if self.ie_polys.n_list().n_dec() == 0 and \
|
||||
self.ie_polys.n_dec() == 0:
|
||||
self.state = self.STATE_NONE
|
||||
else:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def redo_to_end_point(self):
|
||||
while not self.redo_point():
|
||||
pass
|
||||
|
||||
def redo_point(self):
|
||||
self.screen_changed = True
|
||||
if self.state == self.STATE_NONE:
|
||||
if self.ie_polys.n_max > 0:
|
||||
self.state = self.STATE_MASKING
|
||||
if self.ie_polys.n == 0:
|
||||
self.ie_polys.n_inc()
|
||||
|
||||
if self.state == self.STATE_MASKING:
|
||||
while True:
|
||||
l = self.ie_polys.n_list()
|
||||
if l.n_inc() == l.n_max:
|
||||
if self.ie_polys.n == self.ie_polys.n_max:
|
||||
break
|
||||
self.ie_polys.n_inc()
|
||||
else:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def combine_screens(self, screens):
|
||||
|
||||
screens_len = len(screens)
|
||||
|
||||
new_screens = []
|
||||
for screen, padded_overlay in screens:
|
||||
screen_img = np.zeros( (self.sh, self.sw, 3), dtype=np.float32 )
|
||||
|
||||
screen = imagelib.normalize_channels (screen, 3)
|
||||
h,w,c = screen.shape
|
||||
|
||||
screen_img[self.ph:-self.ph, self.pw:-self.pw, :] = screen
|
||||
|
||||
if padded_overlay is not None:
|
||||
screen_img = screen_img + padded_overlay
|
||||
|
||||
screen_img = np.clip(screen_img*255, 0, 255).astype(np.uint8)
|
||||
new_screens.append(screen_img)
|
||||
|
||||
return np.concatenate (new_screens, axis=1)
|
||||
|
||||
def get_screen_status_block(self, w, c):
|
||||
if self.screen_status_block_dirty:
|
||||
self.screen_status_block_dirty = False
|
||||
lines = [
|
||||
'Polys current/max = %d/%d' % (self.ie_polys.n, self.ie_polys.n_max),
|
||||
]
|
||||
if self.get_status_lines_func is not None:
|
||||
lines += self.get_status_lines_func()
|
||||
|
||||
lines_count = len(lines)
|
||||
|
||||
|
||||
h_line = 21
|
||||
h = lines_count * h_line
|
||||
img = np.ones ( (h,w,c) ) * 0.1
|
||||
|
||||
for i in range(lines_count):
|
||||
img[ i*h_line:(i+1)*h_line, 0:w] += \
|
||||
imagelib.get_text_image ( (h_line,w,c), lines[i], color=[0.8]*c )
|
||||
|
||||
self.screen_status_block = np.clip(img*255, 0, 255).astype(np.uint8)
|
||||
|
||||
return self.screen_status_block
|
||||
|
||||
def set_screen_status_block_dirty(self):
|
||||
self.screen_status_block_dirty = True
|
||||
|
||||
def set_screen_changed(self):
|
||||
self.screen_changed = True
|
||||
|
||||
def switch_screen_changed(self):
|
||||
result = self.screen_changed
|
||||
self.screen_changed = False
|
||||
return result
|
||||
|
||||
def make_screen(self):
|
||||
screen_overlay = self.get_screen_overlay()
|
||||
final_mask = self.get_mask()
|
||||
|
||||
masked_img = self.img*final_mask*0.5 + self.img*(1-final_mask)
|
||||
|
||||
pink = np.full ( (self.h, self.w, 3), (1,0,1) )
|
||||
pink_masked_img = self.img*final_mask + pink*(1-final_mask)
|
||||
|
||||
|
||||
|
||||
|
||||
screens = [ (self.img, screen_overlay),
|
||||
(masked_img, screen_overlay),
|
||||
(pink_masked_img, screen_overlay),
|
||||
]
|
||||
screens = self.combine_screens(screens)
|
||||
|
||||
if self.preview_images is None:
|
||||
sh,sw,sc = screens.shape
|
||||
|
||||
prh, prw = self.prwh, self.prwh
|
||||
|
||||
total_w = sum ([ img.shape[1] for (t,img) in self.prev_images ]) + \
|
||||
sum ([ img.shape[1] for (t,img) in self.next_images ])
|
||||
|
||||
total_images_len = len(self.prev_images) + len(self.next_images)
|
||||
|
||||
max_hor_images_count = sw // prw
|
||||
max_side_images_count = (max_hor_images_count - 1) // 2
|
||||
|
||||
prev_images = self.prev_images[-max_side_images_count:]
|
||||
next_images = self.next_images[:max_side_images_count]
|
||||
|
||||
border = 2
|
||||
|
||||
max_wh_bordered = (prw-border*2, prh-border*2)
|
||||
|
||||
prev_images = [ (t, cv2.resize( imagelib.normalize_channels(img, 3), max_wh_bordered )) for t,img in prev_images ]
|
||||
next_images = [ (t, cv2.resize( imagelib.normalize_channels(img, 3), max_wh_bordered )) for t,img in next_images ]
|
||||
|
||||
for images in [prev_images, next_images]:
|
||||
for i, (t, img) in enumerate(images):
|
||||
new_img = np.zeros ( (prh,prw, sc) )
|
||||
new_img[border:-border,border:-border] = img
|
||||
|
||||
if t == 2:
|
||||
cv2.line (new_img, ( prw//2, int(prh//1.5) ), (int(prw/1.5), prh ) , (0,1,0), thickness=2 )
|
||||
cv2.line (new_img, ( int(prw/1.5), prh ), ( prw, prh // 2 ) , (0,1,0), thickness=2 )
|
||||
elif t == 1:
|
||||
cv2.line (new_img, ( prw//2, prh//2 ), ( prw, prh ) , (0,0,1), thickness=2 )
|
||||
cv2.line (new_img, ( prw//2, prh ), ( prw, prh // 2 ) , (0,0,1), thickness=2 )
|
||||
|
||||
images[i] = new_img
|
||||
|
||||
|
||||
preview_images = []
|
||||
if len(prev_images) > 0:
|
||||
preview_images += [ np.concatenate (prev_images, axis=1) ]
|
||||
|
||||
img = np.full ( (prh,prw, sc), (0,0,1), dtype=np.float )
|
||||
img[border:-border,border:-border] = cv2.resize( self.img, max_wh_bordered )
|
||||
|
||||
preview_images += [ img ]
|
||||
|
||||
if len(next_images) > 0:
|
||||
preview_images += [ np.concatenate (next_images, axis=1) ]
|
||||
|
||||
preview_images = np.concatenate ( preview_images, axis=1 )
|
||||
|
||||
left_pad = sw // 2 - len(prev_images) * prw - prw // 2
|
||||
right_pad = sw // 2 - len(next_images) * prw - prw // 2
|
||||
|
||||
preview_images = np.concatenate ([np.zeros ( (preview_images.shape[0], left_pad, preview_images.shape[2]) ),
|
||||
preview_images,
|
||||
np.zeros ( (preview_images.shape[0], right_pad, preview_images.shape[2]) )
|
||||
], axis=1)
|
||||
self.preview_images = np.clip(preview_images * 255, 0, 255 ).astype(np.uint8)
|
||||
|
||||
status_img = self.get_screen_status_block( screens.shape[1], screens.shape[2] )
|
||||
|
||||
result = np.concatenate ( [self.preview_images, screens, status_img], axis=0 )
|
||||
|
||||
return result
|
||||
|
||||
def mask_finish(self, n_clip=True):
|
||||
if self.state == self.STATE_MASKING:
|
||||
self.screen_changed = True
|
||||
if self.ie_polys.n_list().n <= 2:
|
||||
self.ie_polys.n_dec()
|
||||
self.state = self.STATE_NONE
|
||||
if n_clip:
|
||||
self.ie_polys.n_clip()
|
||||
|
||||
def set_mouse_pos(self,x,y):
|
||||
if self.preview_images is not None:
|
||||
y -= self.preview_images.shape[0]
|
||||
|
||||
mouse_x = x % (self.sw) - self.pw
|
||||
mouse_y = y % (self.sh) - self.ph
|
||||
|
||||
|
||||
|
||||
if mouse_x != self.mouse_x or mouse_y != self.mouse_y:
|
||||
self.mouse_xy = np.array( [mouse_x, mouse_y] )
|
||||
self.mouse_x, self.mouse_y = self.mouse_xy
|
||||
self.screen_changed = True
|
||||
|
||||
def mask_point(self, type):
|
||||
self.screen_changed = True
|
||||
if self.state == self.STATE_MASKING and \
|
||||
self.ie_polys.n_list().type != type:
|
||||
self.mask_finish()
|
||||
|
||||
elif self.state == self.STATE_NONE:
|
||||
self.state = self.STATE_MASKING
|
||||
self.ie_polys.add(type)
|
||||
|
||||
if self.state == self.STATE_MASKING:
|
||||
self.ie_polys.n_list().add (self.mouse_x, self.mouse_y)
|
||||
|
||||
def get_ie_polys(self):
|
||||
return self.ie_polys
|
||||
|
||||
def set_ie_polys(self, saved_ie_polys):
|
||||
self.state = self.STATE_NONE
|
||||
self.ie_polys = saved_ie_polys
|
||||
self.redo_to_end_point()
|
||||
self.mask_finish()
|
||||
|
||||
|
||||
def mask_editor_main(input_dir, confirmed_dir=None, skipped_dir=None, no_default_mask=False):
|
||||
input_path = Path(input_dir)
|
||||
|
||||
confirmed_path = Path(confirmed_dir)
|
||||
skipped_path = Path(skipped_dir)
|
||||
|
||||
if not input_path.exists():
|
||||
raise ValueError('Input directory not found. Please ensure it exists.')
|
||||
|
||||
if not confirmed_path.exists():
|
||||
confirmed_path.mkdir(parents=True)
|
||||
|
||||
if not skipped_path.exists():
|
||||
skipped_path.mkdir(parents=True)
|
||||
|
||||
if not no_default_mask:
|
||||
eyebrows_expand_mod = np.clip ( io.input_int ("Default eyebrows expand modifier?", 100, add_info="0..400"), 0, 400 ) / 100.0
|
||||
else:
|
||||
eyebrows_expand_mod = None
|
||||
|
||||
wnd_name = "MaskEditor tool"
|
||||
io.named_window (wnd_name)
|
||||
io.capture_mouse(wnd_name)
|
||||
io.capture_keys(wnd_name)
|
||||
|
||||
cached_images = {}
|
||||
|
||||
image_paths = [ Path(x) for x in pathex.get_image_paths(input_path)]
|
||||
done_paths = []
|
||||
done_images_types = {}
|
||||
image_paths_total = len(image_paths)
|
||||
saved_ie_polys = IEPolys()
|
||||
zoom_factor = 1.0
|
||||
preview_images_count = 9
|
||||
target_wh = 256
|
||||
|
||||
do_prev_count = 0
|
||||
do_save_move_count = 0
|
||||
do_save_count = 0
|
||||
do_skip_move_count = 0
|
||||
do_skip_count = 0
|
||||
|
||||
def jobs_count():
|
||||
return do_prev_count + do_save_move_count + do_save_count + do_skip_move_count + do_skip_count
|
||||
|
||||
is_exit = False
|
||||
while not is_exit:
|
||||
|
||||
if len(image_paths) > 0:
|
||||
filepath = image_paths.pop(0)
|
||||
else:
|
||||
filepath = None
|
||||
|
||||
next_image_paths = image_paths[0:preview_images_count]
|
||||
next_image_paths_names = [ path.name for path in next_image_paths ]
|
||||
prev_image_paths = done_paths[-preview_images_count:]
|
||||
prev_image_paths_names = [ path.name for path in prev_image_paths ]
|
||||
|
||||
for key in list( cached_images.keys() ):
|
||||
if key not in prev_image_paths_names and \
|
||||
key not in next_image_paths_names:
|
||||
cached_images.pop(key)
|
||||
|
||||
for paths in [prev_image_paths, next_image_paths]:
|
||||
for path in paths:
|
||||
if path.name not in cached_images:
|
||||
cached_images[path.name] = cv2_imread(str(path)) / 255.0
|
||||
|
||||
if filepath is not None:
|
||||
dflimg = DFLIMG.load (filepath)
|
||||
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_err ("%s is not a dfl image file" % (filepath.name) )
|
||||
continue
|
||||
else:
|
||||
lmrks = dflimg.get_landmarks()
|
||||
ie_polys = IEPolys.load(dflimg.get_ie_polys())
|
||||
|
||||
if filepath.name in cached_images:
|
||||
img = cached_images[filepath.name]
|
||||
else:
|
||||
img = cached_images[filepath.name] = cv2_imread(str(filepath)) / 255.0
|
||||
|
||||
|
||||
if no_default_mask:
|
||||
mask = np.zeros ( (target_wh,target_wh,3) )
|
||||
else:
|
||||
mask = LandmarksProcessor.get_image_hull_mask( img.shape, lmrks, eyebrows_expand_mod=eyebrows_expand_mod)
|
||||
else:
|
||||
img = np.zeros ( (target_wh,target_wh,3) )
|
||||
mask = np.ones ( (target_wh,target_wh,3) )
|
||||
ie_polys = None
|
||||
|
||||
def get_status_lines_func():
|
||||
return ['Progress: %d / %d . Current file: %s' % (len(done_paths), image_paths_total, str(filepath.name) if filepath is not None else "end" ),
|
||||
'[Left mouse button] - mark include mask.',
|
||||
'[Right mouse button] - mark exclude mask.',
|
||||
'[Middle mouse button] - finish current poly.',
|
||||
'[Mouse wheel] - undo/redo poly or point. [+ctrl] - undo to begin/redo to end',
|
||||
'[r] - applies edits made to last saved image.',
|
||||
'[q] - prev image. [w] - skip and move to %s. [e] - save and move to %s. ' % (skipped_path.name, confirmed_path.name),
|
||||
'[z] - prev image. [x] - skip. [c] - save. ',
|
||||
'hold [shift] - speed up the frame counter by 10.',
|
||||
'[-/+] - window zoom [esc] - quit',
|
||||
]
|
||||
|
||||
try:
|
||||
ed = MaskEditor(img,
|
||||
[ (done_images_types[name], cached_images[name]) for name in prev_image_paths_names ],
|
||||
[ (0, cached_images[name]) for name in next_image_paths_names ],
|
||||
mask, ie_polys, get_status_lines_func)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
continue
|
||||
|
||||
next = False
|
||||
while not next:
|
||||
io.process_messages(0.005)
|
||||
|
||||
if jobs_count() == 0:
|
||||
for (x,y,ev,flags) in io.get_mouse_events(wnd_name):
|
||||
x, y = int (x / zoom_factor), int(y / zoom_factor)
|
||||
ed.set_mouse_pos(x, y)
|
||||
if filepath is not None:
|
||||
if ev == io.EVENT_LBUTTONDOWN:
|
||||
ed.mask_point(1)
|
||||
elif ev == io.EVENT_RBUTTONDOWN:
|
||||
ed.mask_point(0)
|
||||
elif ev == io.EVENT_MBUTTONDOWN:
|
||||
ed.mask_finish()
|
||||
elif ev == io.EVENT_MOUSEWHEEL:
|
||||
if flags & 0x80000000 != 0:
|
||||
if flags & 0x8 != 0:
|
||||
ed.undo_to_begin_point()
|
||||
else:
|
||||
ed.undo_point()
|
||||
else:
|
||||
if flags & 0x8 != 0:
|
||||
ed.redo_to_end_point()
|
||||
else:
|
||||
ed.redo_point()
|
||||
|
||||
for key, chr_key, ctrl_pressed, alt_pressed, shift_pressed in io.get_key_events(wnd_name):
|
||||
if chr_key == 'q' or chr_key == 'z':
|
||||
do_prev_count = 1 if not shift_pressed else 10
|
||||
elif chr_key == '-':
|
||||
zoom_factor = np.clip (zoom_factor-0.1, 0.1, 4.0)
|
||||
ed.set_screen_changed()
|
||||
elif chr_key == '+':
|
||||
zoom_factor = np.clip (zoom_factor+0.1, 0.1, 4.0)
|
||||
ed.set_screen_changed()
|
||||
elif key == 27: #esc
|
||||
is_exit = True
|
||||
next = True
|
||||
break
|
||||
elif filepath is not None:
|
||||
if chr_key == 'e':
|
||||
saved_ie_polys = ed.ie_polys
|
||||
do_save_move_count = 1 if not shift_pressed else 10
|
||||
elif chr_key == 'c':
|
||||
saved_ie_polys = ed.ie_polys
|
||||
do_save_count = 1 if not shift_pressed else 10
|
||||
elif chr_key == 'w':
|
||||
do_skip_move_count = 1 if not shift_pressed else 10
|
||||
elif chr_key == 'x':
|
||||
do_skip_count = 1 if not shift_pressed else 10
|
||||
elif chr_key == 'r' and saved_ie_polys != None:
|
||||
ed.set_ie_polys(saved_ie_polys)
|
||||
|
||||
if do_prev_count > 0:
|
||||
do_prev_count -= 1
|
||||
if len(done_paths) > 0:
|
||||
if filepath is not None:
|
||||
image_paths.insert(0, filepath)
|
||||
|
||||
filepath = done_paths.pop(-1)
|
||||
done_images_types[filepath.name] = 0
|
||||
|
||||
if filepath.parent != input_path:
|
||||
new_filename_path = input_path / filepath.name
|
||||
filepath.rename ( new_filename_path )
|
||||
image_paths.insert(0, new_filename_path)
|
||||
else:
|
||||
image_paths.insert(0, filepath)
|
||||
|
||||
next = True
|
||||
elif filepath is not None:
|
||||
if do_save_move_count > 0:
|
||||
do_save_move_count -= 1
|
||||
|
||||
ed.mask_finish()
|
||||
dflimg.set_ie_polys(ed.get_ie_polys())
|
||||
dflimg.set_eyebrows_expand_mod(eyebrows_expand_mod)
|
||||
dflimg.save()
|
||||
|
||||
done_paths += [ confirmed_path / filepath.name ]
|
||||
done_images_types[filepath.name] = 2
|
||||
filepath.rename(done_paths[-1])
|
||||
|
||||
next = True
|
||||
elif do_save_count > 0:
|
||||
do_save_count -= 1
|
||||
|
||||
ed.mask_finish()
|
||||
dflimg.set_ie_polys(ed.get_ie_polys())
|
||||
dflimg.set_eyebrows_expand_mod(eyebrows_expand_mod)
|
||||
dflimg.save()
|
||||
|
||||
done_paths += [ filepath ]
|
||||
done_images_types[filepath.name] = 2
|
||||
|
||||
next = True
|
||||
elif do_skip_move_count > 0:
|
||||
do_skip_move_count -= 1
|
||||
|
||||
done_paths += [ skipped_path / filepath.name ]
|
||||
done_images_types[filepath.name] = 1
|
||||
filepath.rename(done_paths[-1])
|
||||
|
||||
next = True
|
||||
elif do_skip_count > 0:
|
||||
do_skip_count -= 1
|
||||
|
||||
done_paths += [ filepath ]
|
||||
done_images_types[filepath.name] = 1
|
||||
|
||||
next = True
|
||||
else:
|
||||
do_save_move_count = do_save_count = do_skip_move_count = do_skip_count = 0
|
||||
|
||||
if jobs_count() == 0:
|
||||
if ed.switch_screen_changed():
|
||||
screen = ed.make_screen()
|
||||
if zoom_factor != 1.0:
|
||||
h,w,c = screen.shape
|
||||
screen = cv2.resize ( screen, ( int(w*zoom_factor), int(h*zoom_factor) ) )
|
||||
io.show_image (wnd_name, screen )
|
||||
|
||||
|
||||
io.process_messages(0.005)
|
||||
|
||||
io.destroy_all_windows()
|
|
@ -12,7 +12,7 @@ from core.interact import interact as io
|
|||
from core.joblib import MPClassFuncOnDemand, MPFunc
|
||||
from core.leras import nn
|
||||
from DFLIMG import DFLIMG
|
||||
from facelib import FaceEnhancer, FaceType, LandmarksProcessor, TernausNet, XSegNet
|
||||
from facelib import FaceEnhancer, FaceType, LandmarksProcessor, XSegNet
|
||||
from merger import FrameInfo, MergerConfig, InteractiveMergerSubprocessor
|
||||
|
||||
def main (model_class_name=None,
|
||||
|
@ -55,12 +55,6 @@ def main (model_class_name=None,
|
|||
predictor_func = MPFunc(predictor_func)
|
||||
|
||||
run_on_cpu = len(nn.getCurrentDeviceConfig().devices) == 0
|
||||
fanseg_full_face_256_extract_func = MPClassFuncOnDemand(TernausNet, 'extract',
|
||||
name=f'FANSeg_{FaceType.toString(FaceType.FULL)}',
|
||||
resolution=256,
|
||||
place_model_on_cpu=True,
|
||||
run_on_cpu=run_on_cpu)
|
||||
|
||||
xseg_256_extract_func = MPClassFuncOnDemand(XSegNet, 'extract',
|
||||
name='XSeg',
|
||||
resolution=256,
|
||||
|
@ -199,7 +193,6 @@ def main (model_class_name=None,
|
|||
predictor_func = predictor_func,
|
||||
predictor_input_shape = predictor_input_shape,
|
||||
face_enhancer_func = face_enhancer_func,
|
||||
fanseg_full_face_256_extract_func = fanseg_full_face_256_extract_func,
|
||||
xseg_256_extract_func = xseg_256_extract_func,
|
||||
merger_config = cfg,
|
||||
frames = frames,
|
||||
|
|
|
@ -6,7 +6,6 @@ import sys
|
|||
import tempfile
|
||||
from functools import cmp_to_key
|
||||
from pathlib import Path
|
||||
from shutil import copyfile
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
@ -35,7 +34,7 @@ class BlurEstimatorSubprocessor(Subprocessor):
|
|||
else:
|
||||
image = cv2_imread( str(filepath) )
|
||||
return [ str(filepath), estimate_sharpness(image) ]
|
||||
|
||||
|
||||
|
||||
#override
|
||||
def get_data_name (self, data):
|
||||
|
@ -146,7 +145,7 @@ def sort_by_face_pitch(input_path):
|
|||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
def sort_by_face_source_rect_size(input_path):
|
||||
io.log_info ("Sorting by face rect size...")
|
||||
img_list = []
|
||||
|
@ -163,15 +162,15 @@ def sort_by_face_source_rect_size(input_path):
|
|||
|
||||
source_rect = dflimg.get_source_rect()
|
||||
rect_area = mathlib.polygon_area(np.array(source_rect[[0,2,2,0]]).astype(np.float32), np.array(source_rect[[1,1,3,3]]).astype(np.float32))
|
||||
|
||||
|
||||
img_list.append( [str(filepath), rect_area ] )
|
||||
|
||||
io.log_info ("Sorting...")
|
||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
return img_list, trash_img_list
|
||||
|
||||
|
||||
|
||||
class HistSsimSubprocessor(Subprocessor):
|
||||
class Cli(Subprocessor.Cli):
|
||||
|
@ -444,13 +443,13 @@ class FinalLoaderSubprocessor(Subprocessor):
|
|||
raise Exception ("Unable to load %s" % (filepath.name) )
|
||||
|
||||
gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
|
||||
if self.faster:
|
||||
source_rect = dflimg.get_source_rect()
|
||||
sharpness = mathlib.polygon_area(np.array(source_rect[[0,2,2,0]]).astype(np.float32), np.array(source_rect[[1,1,3,3]]).astype(np.float32))
|
||||
else:
|
||||
sharpness = estimate_sharpness(gray)
|
||||
|
||||
|
||||
pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll ( dflimg.get_landmarks(), size=dflimg.get_shape()[1] )
|
||||
|
||||
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
|
||||
|
@ -586,12 +585,12 @@ class FinalHistDissimSubprocessor(Subprocessor):
|
|||
def get_result(self):
|
||||
return self.result
|
||||
|
||||
def sort_best_faster(input_path):
|
||||
def sort_best_faster(input_path):
|
||||
return sort_best(input_path, faster=True)
|
||||
|
||||
|
||||
def sort_best(input_path, faster=False):
|
||||
target_count = io.input_int ("Target number of faces?", 2000)
|
||||
|
||||
|
||||
io.log_info ("Performing sort by best faces.")
|
||||
if faster:
|
||||
io.log_info("Using faster algorithm. Faces will be sorted by source-rect-area instead of blur.")
|
||||
|
@ -630,7 +629,7 @@ def sort_best(input_path, faster=False):
|
|||
|
||||
imgs_per_grad += total_lack // grads
|
||||
|
||||
|
||||
|
||||
sharpned_imgs_per_grad = imgs_per_grad*10
|
||||
for g in io.progress_bar_generator ( range (grads), "Sort by blur"):
|
||||
img_list = yaws_sample_list[g]
|
||||
|
@ -770,7 +769,7 @@ def sort_by_absdiff(input_path):
|
|||
|
||||
outputs_full = []
|
||||
outputs_remain = []
|
||||
|
||||
|
||||
for i in range(batch_size):
|
||||
diff_t = tf.reduce_sum( tf.abs(i_t-j_t[i]), axis=[1,2,3] )
|
||||
outputs_full.append(diff_t)
|
||||
|
|
|
@ -5,7 +5,6 @@ import cv2
|
|||
|
||||
from DFLIMG import *
|
||||
from facelib import LandmarksProcessor, FaceType
|
||||
from core.imagelib import IEPolys
|
||||
from core.interact import interact as io
|
||||
from core import pathex
|
||||
from core.cv2ex import *
|
||||
|
@ -100,7 +99,7 @@ def add_landmarks_debug_images(input_path):
|
|||
rect = dflimg.get_source_rect()
|
||||
LandmarksProcessor.draw_rect_landmarks(img, rect, face_landmarks, FaceType.FULL )
|
||||
else:
|
||||
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load(dflimg.get_ie_polys()) )
|
||||
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True )
|
||||
|
||||
|
||||
|
||||
|
@ -160,42 +159,3 @@ def recover_original_aligned_filename(input_path):
|
|||
fs.rename (fd)
|
||||
except:
|
||||
io.log_err ('fail to rename %s' % (fs.name) )
|
||||
|
||||
|
||||
"""
|
||||
def convert_png_to_jpg_file (filepath):
|
||||
filepath = Path(filepath)
|
||||
|
||||
if filepath.suffix != '.png':
|
||||
return
|
||||
|
||||
dflpng = DFLPNG.load (str(filepath) )
|
||||
if dflpng is None:
|
||||
io.log_err ("%s is not a dfl png image file" % (filepath.name) )
|
||||
return
|
||||
|
||||
dfl_dict = dflpng.get_dict()
|
||||
|
||||
img = cv2_imread (str(filepath))
|
||||
new_filepath = str(filepath.parent / (filepath.stem + '.jpg'))
|
||||
cv2_imwrite ( new_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
|
||||
|
||||
DFLJPG.x( new_filepath,
|
||||
face_type=dfl_dict.get('face_type', None),
|
||||
landmarks=dfl_dict.get('landmarks', None),
|
||||
ie_polys=dfl_dict.get('ie_polys', None),
|
||||
source_filename=dfl_dict.get('source_filename', None),
|
||||
source_rect=dfl_dict.get('source_rect', None),
|
||||
source_landmarks=dfl_dict.get('source_landmarks', None) )
|
||||
|
||||
filepath.unlink()
|
||||
|
||||
def convert_png_to_jpg_folder (input_path):
|
||||
input_path = Path(input_path)
|
||||
|
||||
io.log_info ("Converting PNG to JPG...\r\n")
|
||||
|
||||
for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Converting"):
|
||||
filepath = Path(filepath)
|
||||
convert_png_to_jpg_file(filepath)
|
||||
"""
|
|
@ -1,109 +1,96 @@
|
|||
import traceback
|
||||
import json
|
||||
import shutil
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from core import pathex
|
||||
from core.imagelib import IEPolys
|
||||
from core.cv2ex import *
|
||||
from core.interact import interact as io
|
||||
from core.leras import nn
|
||||
from DFLIMG import *
|
||||
from facelib import XSegNet
|
||||
|
||||
|
||||
def merge(input_dir):
|
||||
input_path = Path(input_dir)
|
||||
def apply_xseg(input_path, model_path):
|
||||
if not input_path.exists():
|
||||
raise ValueError('input_dir not found. Please ensure it exists.')
|
||||
raise ValueError(f'{input_path} not found. Please ensure it exists.')
|
||||
|
||||
if not model_path.exists():
|
||||
raise ValueError(f'{model_path} not found. Please ensure it exists.')
|
||||
|
||||
io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.')
|
||||
|
||||
device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True)
|
||||
nn.initialize(device_config)
|
||||
|
||||
xseg = XSegNet(name='XSeg',
|
||||
load_weights=True,
|
||||
weights_file_root=model_path,
|
||||
data_format=nn.data_format,
|
||||
raise_on_no_model_files=True)
|
||||
res = xseg.get_resolution()
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
images_processed = 0
|
||||
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
json_filepath = filepath.parent / (filepath.stem+'.json')
|
||||
if json_filepath.exists():
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is not None and dflimg.has_data():
|
||||
try:
|
||||
json_dict = json.loads(json_filepath.read_text())
|
||||
|
||||
seg_ie_polys = IEPolys()
|
||||
total_points = 0
|
||||
|
||||
#include polys first
|
||||
for shape in json_dict['shapes']:
|
||||
if shape['shape_type'] == 'polygon' and \
|
||||
shape['label'] != '0':
|
||||
seg_ie_poly = seg_ie_polys.add(1)
|
||||
|
||||
for x,y in shape['points']:
|
||||
seg_ie_poly.add( int(x), int(y) )
|
||||
total_points += 1
|
||||
|
||||
#exclude polys
|
||||
for shape in json_dict['shapes']:
|
||||
if shape['shape_type'] == 'polygon' and \
|
||||
shape['label'] == '0':
|
||||
seg_ie_poly = seg_ie_polys.add(0)
|
||||
|
||||
for x,y in shape['points']:
|
||||
seg_ie_poly.add( int(x), int(y) )
|
||||
total_points += 1
|
||||
|
||||
if total_points == 0:
|
||||
io.log_info(f"No points found in {json_filepath}, skipping.")
|
||||
continue
|
||||
|
||||
dflimg.set_seg_ie_polys ( seg_ie_polys.dump() )
|
||||
dflimg.save()
|
||||
|
||||
json_filepath.unlink()
|
||||
|
||||
images_processed += 1
|
||||
except:
|
||||
io.log_err(f"err {filepath}, {traceback.format_exc()}")
|
||||
return
|
||||
|
||||
io.log_info(f"Images processed: {images_processed}")
|
||||
|
||||
def split(input_dir ):
|
||||
input_path = Path(input_dir)
|
||||
if not input_path.exists():
|
||||
raise ValueError('input_dir not found. Please ensure it exists.')
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
images_processed = 0
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
json_filepath = filepath.parent / (filepath.stem+'.json')
|
||||
|
||||
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is not None and dflimg.has_data():
|
||||
try:
|
||||
seg_ie_polys = dflimg.get_seg_ie_polys()
|
||||
if seg_ie_polys is not None:
|
||||
json_dict = {}
|
||||
json_dict['version'] = "4.2.9"
|
||||
json_dict['flags'] = {}
|
||||
json_dict['shapes'] = []
|
||||
json_dict['imagePath'] = filepath.name
|
||||
json_dict['imageData'] = None
|
||||
|
||||
for poly_type, points_list in seg_ie_polys:
|
||||
shape_dict = {}
|
||||
shape_dict['label'] = str(poly_type)
|
||||
shape_dict['points'] = points_list
|
||||
shape_dict['group_id'] = None
|
||||
shape_dict['shape_type'] = 'polygon'
|
||||
shape_dict['flags'] = {}
|
||||
json_dict['shapes'].append( shape_dict )
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_info(f'{filepath} is not a DFLIMG')
|
||||
continue
|
||||
|
||||
img = cv2_imread(filepath).astype(np.float32) / 255.0
|
||||
h,w,c = img.shape
|
||||
if w != res:
|
||||
img = cv2.resize( img, (res,res), interpolation=cv2.INTER_CUBIC )
|
||||
if len(img.shape) == 2:
|
||||
img = img[...,None]
|
||||
|
||||
mask = xseg.extract(img)
|
||||
mask[mask < 0.5]=0
|
||||
mask[mask >= 0.5]=1
|
||||
|
||||
dflimg.set_xseg_mask(mask)
|
||||
dflimg.save()
|
||||
|
||||
json_filepath.write_text( json.dumps (json_dict,indent=4) )
|
||||
def remove_xseg(input_path):
|
||||
if not input_path.exists():
|
||||
raise ValueError(f'{input_path} not found. Please ensure it exists.')
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_info(f'{filepath} is not a DFLIMG')
|
||||
continue
|
||||
|
||||
dflimg.set_xseg_mask(None)
|
||||
dflimg.save()
|
||||
|
||||
def fetch_xseg(input_path):
|
||||
if not input_path.exists():
|
||||
raise ValueError(f'{input_path} not found. Please ensure it exists.')
|
||||
|
||||
output_path = input_path.parent / (input_path.name + '_xseg')
|
||||
output_path.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
io.log_info(f'Copying faces containing XSeg polygons to {output_path.name}/ folder.')
|
||||
|
||||
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
|
||||
|
||||
files_copied = 0
|
||||
for filepath in io.progress_bar_generator(images_paths, "Processing"):
|
||||
dflimg = DFLIMG.load(filepath)
|
||||
if dflimg is None or not dflimg.has_data():
|
||||
io.log_info(f'{filepath} is not a DFLIMG')
|
||||
continue
|
||||
|
||||
ie_polys = dflimg.get_seg_ie_polys()
|
||||
|
||||
dflimg.set_seg_ie_polys(None)
|
||||
dflimg.save()
|
||||
images_processed += 1
|
||||
except:
|
||||
io.log_err(f"err {filepath}, {traceback.format_exc()}")
|
||||
return
|
||||
|
||||
io.log_info(f"Images processed: {images_processed}")
|
||||
if ie_polys.has_polys():
|
||||
files_copied += 1
|
||||
shutil.copy ( str(filepath), str(output_path / filepath.name) )
|
||||
|
||||
io.log_info(f'Files copied: {files_copied}')
|
|
@ -8,7 +8,6 @@ import numpy as np
|
|||
|
||||
from core import imagelib, pathex
|
||||
from core.cv2ex import *
|
||||
from core.imagelib import IEPolys
|
||||
from core.interact import interact as io
|
||||
from core.joblib import Subprocessor
|
||||
from core.leras import nn
|
||||
|
@ -412,31 +411,3 @@ def dev_segmented_trash(input_dir):
|
|||
except:
|
||||
io.log_info ('fail to trashing %s' % (src.name) )
|
||||
|
||||
|
||||
"""
|
||||
#mark only
|
||||
for data in extract_data:
|
||||
filepath = data.filepath
|
||||
output_filepath = output_path / (filepath.stem+'.jpg')
|
||||
|
||||
img = cv2_imread(filepath)
|
||||
img = imagelib.normalize_channels(img, 3)
|
||||
cv2_imwrite(output_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] )
|
||||
|
||||
json_dict = images_jsons[filepath]
|
||||
|
||||
ie_polys = IEPolys()
|
||||
for shape in json_dict['shapes']:
|
||||
ie_poly = ie_polys.add(1)
|
||||
for x,y in shape['points']:
|
||||
ie_poly.add( int(x), int(y) )
|
||||
|
||||
|
||||
DFLJPG.x(output_filepath, face_type=FaceType.toString(FaceType.MARK_ONLY),
|
||||
landmarks=data.landmarks[0],
|
||||
ie_polys=ie_polys,
|
||||
source_filename=filepath.name,
|
||||
source_rect=data.rects[0],
|
||||
source_landmarks=data.landmarks[0]
|
||||
)
|
||||
"""
|
Loading…
Add table
Add a link
Reference in a new issue