manual extractor: increased FPS,

sort by final : now you can specify target number of images,
converter: fix seamless mask and exception,
huge refactoring
This commit is contained in:
iperov 2019-02-28 11:56:31 +04:00
parent 7db469a1da
commit 438213e97c
30 changed files with 1834 additions and 1718 deletions

View file

@ -3,21 +3,15 @@ import copy
You can implement your own Converter, check example ConverterMasked.py
'''
class ConverterBase(object):
MODE_FACE = 0
MODE_IMAGE = 1
MODE_IMAGE_WITH_LANDMARKS = 2
class Converter(object):
TYPE_FACE = 0 #calls convert_face
TYPE_IMAGE = 1 #calls convert_image without landmarks
TYPE_IMAGE_WITH_LANDMARKS = 2 #calls convert_image with landmarks
#overridable
def __init__(self, predictor):
self.predictor = predictor
#overridable
def get_mode(self):
#MODE_FACE calls convert_face
#MODE_IMAGE calls convert_image without landmarks
#MODE_IMAGE_WITH_LANDMARKS calls convert_image with landmarks
return ConverterBase.MODE_FACE
def __init__(self, predictor_func, type):
self.predictor_func = predictor_func
self.type = type
#overridable
def convert_face (self, img_bgr, img_face_landmarks, debug):
@ -40,7 +34,7 @@ class ConverterBase(object):
def copy(self):
return copy.copy(self)
def copy_and_set_predictor(self, predictor):
def copy_and_set_predictor(self, predictor_func):
result = self.copy()
result.predictor = predictor
result.predictor_func = predictor_func
return result

View file

@ -1,4 +1,4 @@
from models import ConverterBase
from .Converter import Converter
from facelib import LandmarksProcessor
from facelib import FaceType
@ -7,38 +7,33 @@ import numpy as np
from utils import image_utils
'''
predictor:
predictor_func:
input: [predictor_input_size, predictor_input_size, BGR]
output: [predictor_input_size, predictor_input_size, BGR]
'''
class ConverterImage(ConverterBase):
class ConverterImage(Converter):
#override
def __init__(self, predictor,
def __init__(self, predictor_func,
predictor_input_size=0,
output_size=0,
**in_options):
output_size=0):
super().__init__(predictor)
super().__init__(predictor_func, Converter.TYPE_IMAGE)
self.predictor_input_size = predictor_input_size
self.output_size = output_size
#override
def get_mode(self):
return ConverterBase.MODE_IMAGE
#override
def dummy_predict(self):
self.predictor ( np.zeros ( (self.predictor_input_size, self.predictor_input_size,3), dtype=np.float32) )
self.predictor_func ( np.zeros ( (self.predictor_input_size, self.predictor_input_size,3), dtype=np.float32) )
#override
def convert_image (self, img_bgr, img_landmarks, debug):
img_size = img_bgr.shape[1], img_bgr.shape[0]
predictor_input_bgr = cv2.resize ( img_bgr, (self.predictor_input_size, self.predictor_input_size), cv2.INTER_LANCZOS4 )
predicted_bgr = self.predictor ( predictor_input_bgr )
predicted_bgr = self.predictor_func ( predictor_input_bgr )
output = cv2.resize ( predicted_bgr, (self.output_size, self.output_size), cv2.INTER_LANCZOS4 )
if debug:

View file

@ -1,10 +1,10 @@
from models import ConverterBase
from .Converter import Converter
from facelib import LandmarksProcessor
from facelib import FaceType
import cv2
import numpy as np
from utils import image_utils
from utils.console_utils import *
from interact import interact as io
'''
default_mode = {1:'overlay',
@ -14,10 +14,10 @@ default_mode = {1:'overlay',
5:'seamless-hist-match',
6:'raw'}
'''
class ConverterMasked(ConverterBase):
class ConverterMasked(Converter):
#override
def __init__(self, predictor,
def __init__(self, predictor_func,
predictor_input_size=0,
output_size=0,
face_type=FaceType.FULL,
@ -26,16 +26,15 @@ class ConverterMasked(ConverterBase):
base_blur_mask_modifier = 0,
default_erode_mask_modifier = 0,
default_blur_mask_modifier = 0,
clip_hborder_mask_per = 0,
**in_options):
clip_hborder_mask_per = 0):
super().__init__(predictor)
super().__init__(predictor_func, Converter.TYPE_FACE)
self.predictor_input_size = predictor_input_size
self.output_size = output_size
self.face_type = face_type
self.clip_hborder_mask_per = clip_hborder_mask_per
mode = input_int ("Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) seamless hist match, (6) raw. Default - %d : " % (default_mode) , default_mode)
mode = io.input_int ("Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) seamless hist match, (6) raw. Default - %d : " % (default_mode) , default_mode)
mode_dict = {1:'overlay',
2:'hist-match',
@ -47,7 +46,7 @@ class ConverterMasked(ConverterBase):
self.mode = mode_dict.get (mode, mode_dict[default_mode] )
if self.mode == 'raw':
mode = input_int ("Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ", 2)
mode = io.input_int ("Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ", 2)
self.raw_mode = {1:'rgb',
2:'rgb-mask',
3:'mask-only',
@ -55,37 +54,33 @@ class ConverterMasked(ConverterBase):
if self.mode != 'raw':
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
self.masked_hist_match = input_bool("Masked hist match? (y/n skip:y) : ", True)
self.masked_hist_match = io.input_bool("Masked hist match? (y/n skip:y) : ", True)
if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
self.hist_match_threshold = np.clip ( input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
self.use_predicted_mask = input_bool("Use predicted mask? (y/n skip:y) : ", True)
self.use_predicted_mask = io.input_bool("Use predicted mask? (y/n skip:y) : ", True)
if self.mode != 'raw':
self.erode_mask_modifier = base_erode_mask_modifier + np.clip ( input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200)
self.blur_mask_modifier = base_blur_mask_modifier + np.clip ( input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200)
self.erode_mask_modifier = base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200)
self.blur_mask_modifier = base_blur_mask_modifier + np.clip ( io.input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200)
self.seamless_erode_mask_modifier = 0
if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
self.seamless_erode_mask_modifier = np.clip ( input_int ("Choose seamless erode mask modifier [-100..100] (skip:0) : ", 0), -100, 100)
self.seamless_erode_mask_modifier = np.clip ( io.input_int ("Choose seamless erode mask modifier [-100..100] (skip:0) : ", 0), -100, 100)
self.output_face_scale = np.clip ( 1.0 + input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0)*0.01, 0.5, 1.5)
self.color_transfer_mode = input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
self.output_face_scale = np.clip ( 1.0 + io.input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0)*0.01, 0.5, 1.5)
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
if self.mode != 'raw':
self.final_image_color_degrade_power = np.clip ( input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
self.alpha = input_bool("Export png with alpha channel? (y/n skip:n) : ", False)
self.final_image_color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
self.alpha = io.input_bool("Export png with alpha channel? (y/n skip:n) : ", False)
print ("")
#override
def get_mode(self):
return ConverterBase.MODE_FACE
io.log_info ("")
#override
def dummy_predict(self):
self.predictor ( np.zeros ( (self.predictor_input_size,self.predictor_input_size,4), dtype=np.float32 ) )
self.predictor_func ( np.zeros ( (self.predictor_input_size,self.predictor_input_size,4), dtype=np.float32 ) )
#override
def convert_face (self, img_bgr, img_face_landmarks, debug):
@ -106,7 +101,7 @@ class ConverterMasked(ConverterBase):
predictor_input_mask_a_0 = cv2.resize (dst_face_mask_a_0, (self.predictor_input_size,self.predictor_input_size))
predictor_input_mask_a = np.expand_dims (predictor_input_mask_a_0, -1)
predicted_bgra = self.predictor ( np.concatenate( (predictor_input_bgr, predictor_input_mask_a), -1) )
predicted_bgra = self.predictor_func ( np.concatenate( (predictor_input_bgr, predictor_input_mask_a), -1) )
prd_face_bgr = np.clip (predicted_bgra[:,:,0:3], 0, 1.0 )
prd_face_mask_a_0 = np.clip (predicted_bgra[:,:,3], 0.0, 1.0)
@ -129,11 +124,13 @@ class ConverterMasked(ConverterBase):
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0
img_face_mask_flatten_aaa = img_face_mask_aaa.copy()
img_face_mask_flatten_aaa[img_face_mask_flatten_aaa > 0.9] = 1.0
maxregion = np.argwhere(img_face_mask_flatten_aaa==1.0)
if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
img_face_seamless_mask_aaa = img_face_mask_aaa.copy()
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa > 0.9] = 1.0
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= 0.9] = 0.0
maxregion = np.argwhere(img_face_mask_aaa > 0.9)
out_img = img_bgr.copy()
if self.mode == 'raw':
@ -155,24 +152,24 @@ class ConverterMasked(ConverterBase):
maxy,maxx = maxregion.max(axis=0)[:2]
if debug:
print ("maxregion.size: %d, minx:%d, maxx:%d miny:%d, maxy:%d" % (maxregion.size, minx, maxx, miny, maxy ) )
io.log_info ("maxregion.size: %d, minx:%d, maxx:%d miny:%d, maxy:%d" % (maxregion.size, minx, maxx, miny, maxy ) )
lenx = maxx - minx
leny = maxy - miny
if lenx >= 4 and leny >= 4:
masky = int(minx+(lenx//2))
maskx = int(miny+(leny//2))
maskx = int(minx+(lenx//2))
masky = int(miny+(leny//2))
lowest_len = min (lenx, leny)
if debug:
print ("lowest_len = %f" % (lowest_len) )
io.log_info ("lowest_len = %f" % (lowest_len) )
img_mask_blurry_aaa = img_face_mask_aaa
if self.erode_mask_modifier != 0:
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.erode_mask_modifier )
if debug:
print ("erode_size = %d" % (ero) )
io.log_info ("erode_size = %d" % (ero) )
if ero > 0:
img_mask_blurry_aaa = cv2.erode(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
elif ero < 0:
@ -181,12 +178,13 @@ class ConverterMasked(ConverterBase):
if self.seamless_erode_mask_modifier != 0:
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.seamless_erode_mask_modifier )
if debug:
print ("seamless_erode_size = %d" % (ero) )
io.log_info ("seamless_erode_size = %d" % (ero) )
if ero > 0:
img_face_mask_flatten_aaa = cv2.erode(img_face_mask_flatten_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
img_face_seamless_mask_aaa = cv2.erode(img_face_seamless_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
elif ero < 0:
img_face_mask_flatten_aaa = cv2.dilate(img_face_mask_flatten_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
img_face_seamless_mask_aaa = cv2.dilate(img_face_seamless_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
img_face_seamless_mask_aaa = np.clip (img_face_seamless_mask_aaa, 0, 1)
if self.clip_hborder_mask_per > 0: #clip hborder before blur
prd_hborder_rect_mask_a = np.ones ( prd_face_mask_a.shape, dtype=np.float32)
prd_border_size = int ( prd_hborder_rect_mask_a.shape[1] * self.clip_hborder_mask_per )
@ -205,7 +203,7 @@ class ConverterMasked(ConverterBase):
if self.blur_mask_modifier > 0:
blur = int( lowest_len * 0.10 * 0.01*self.blur_mask_modifier )
if debug:
print ("blur_size = %d" % (blur) )
io.log_info ("blur_size = %d" % (blur) )
if blur > 0:
img_mask_blurry_aaa = cv2.blur(img_mask_blurry_aaa, (blur, blur) )
@ -276,8 +274,12 @@ class ConverterMasked(ConverterBase):
if debug:
debugs += [out_img.copy()]
out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), (img_bgr*255).astype(np.uint8), (img_face_mask_flatten_aaa*255).astype(np.uint8), (masky,maskx) , cv2.NORMAL_CLONE )
out_img = out_img.astype(dtype=np.float32) / 255.0
try:
out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), (img_bgr*255).astype(np.uint8), (img_face_seamless_mask_aaa*255).astype(np.uint8), (maskx,masky) , cv2.NORMAL_CLONE )
out_img = out_img.astype(dtype=np.float32) / 255.0
except:
#seamlessClone may fail in some cases
pass
if debug:
debugs += [out_img.copy()]

3
converters/__init__.py Normal file
View file

@ -0,0 +1,3 @@
from .Converter import Converter
from .ConverterMasked import ConverterMasked
from .ConverterImage import ConverterImage

View file

@ -3,10 +3,6 @@ import os
import cv2
from pathlib import Path
from utils import std_utils
def transform(point, center, scale, resolution):
pt = np.array ( [point[0], point[1], 1.0] )
h = 200.0 * scale
@ -123,8 +119,7 @@ class LandmarksExtractor(object):
image = crop(input_image, center, scale).transpose ( (2,0,1) ).astype(np.float32) / 255.0
image = np.expand_dims(image, 0)
with std_utils.suppress_stdout_stderr():
predicted = self.keras_model.predict (image)
predicted = self.keras_model.predict (image)
pts_img = get_pts_from_predict ( predicted[-1][0], center, scale)
pts_img = [ ( int(pt[0]), int(pt[1]) ) for pt in pts_img ]

View file

@ -134,7 +134,7 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
elif face_type == FaceType.HEAD:
padding = (output_size / 64) * 24
else:
raise ValueError ('wrong face_type')
raise ValueError ('wrong face_type: ', face_type)
mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
mat = mat * (output_size - 2 * padding)

1
interact/__init__.py Normal file
View file

@ -0,0 +1 @@
from .interact import interact

236
interact/interact.py Normal file
View file

@ -0,0 +1,236 @@
import os
import sys
import time
import types
import multiprocessing
import cv2
from tqdm import tqdm
class Interact(object):
EVENT_LBUTTONDOWN = 1
EVENT_MOUSEWHEEL = 2
def __init__(self):
self.named_windows = {}
self.capture_mouse_windows = {}
self.capture_keys_windows = {}
self.mouse_events = {}
self.key_events = {}
self.pg_bar = None
def log_info(self, msg, end='\n'):
print (msg, end=end)
def log_err(self, msg, end='\n'):
print (msg, end=end)
def named_window(self, wnd_name):
if wnd_name not in self.named_windows:
#we will show window only on first show_image
self.named_windows[wnd_name] = 0
else: print("named_window: ", wnd_name, " already created.")
def destroy_all_windows(self):
if len( self.named_windows ) != 0:
cv2.destroyAllWindows()
self.named_windows = {}
self.capture_mouse_windows = {}
self.capture_keys_windows = {}
self.mouse_events = {}
self.key_events = {}
def show_image(self, wnd_name, img):
if wnd_name in self.named_windows:
if self.named_windows[wnd_name] == 0:
self.named_windows[wnd_name] = 1
cv2.namedWindow(wnd_name)
if wnd_name in self.capture_mouse_windows:
self.capture_mouse(wnd_name)
cv2.imshow (wnd_name, img)
else: print("show_image: named_window ", wnd_name, " not found.")
def capture_mouse(self, wnd_name):
def onMouse(event, x, y, flags, param):
(inst, wnd_name) = param
if event == cv2.EVENT_LBUTTONDOWN: ev = Interact.EVENT_LBUTTONDOWN
elif event == cv2.EVENT_MOUSEWHEEL: ev = Interact.EVENT_MOUSEWHEEL
else: ev = 0
inst.add_mouse_event (wnd_name, x, y, ev, flags)
if wnd_name in self.named_windows:
self.capture_mouse_windows[wnd_name] = True
if self.named_windows[wnd_name] == 1:
cv2.setMouseCallback(wnd_name, onMouse, (self,wnd_name) )
else: print("capture_mouse: named_window ", wnd_name, " not found.")
def capture_keys(self, wnd_name):
if wnd_name in self.named_windows:
if wnd_name not in self.capture_keys_windows:
self.capture_keys_windows[wnd_name] = True
else: print("capture_keys: already set for window ", wnd_name)
else: print("capture_keys: named_window ", wnd_name, " not found.")
def progress_bar(self, desc, total, leave=True):
if self.pg_bar is None:
self.pg_bar = tqdm( total=total, desc=desc, leave=leave, ascii=True )
else: print("progress_bar: already set.")
def progress_bar_inc(self, c):
if self.pg_bar is not None:
self.pg_bar.n += c
self.pg_bar.refresh()
else: print("progress_bar not set.")
def progress_bar_close(self):
if self.pg_bar is not None:
self.pg_bar.close()
self.pg_bar = None
else: print("progress_bar not set.")
def progress_bar_generator(self, data, desc, leave=True):
for x in tqdm( data, desc=desc, leave=leave, ascii=True ):
yield x
def process_messages(self, sleep_time=0):
has_windows = False
has_capture_keys = False
if len(self.named_windows) != 0:
has_windows = True
if len(self.capture_keys_windows) != 0:
has_capture_keys = True
if has_windows or has_capture_keys:
wait_key_time = int(sleep_time / 1000) if sleep_time != 0 else 1
key = cv2.waitKey(1) & 0xFF
else:
if sleep_time != 0:
time.sleep(sleep_time)
if has_capture_keys and key != 255:
for wnd_name in self.capture_keys_windows:
self.add_key_event (wnd_name, key)
def wait_any_key(self):
cv2.waitKey(0)
def add_mouse_event(self, wnd_name, x, y, ev, flags):
if wnd_name not in self.mouse_events:
self.mouse_events[wnd_name] = []
self.mouse_events[wnd_name] += [ (x, y, ev, flags) ]
def add_key_event(self, wnd_name, key):
if wnd_name not in self.key_events:
self.key_events[wnd_name] = []
self.key_events[wnd_name] += [ (key,) ]
def get_mouse_events(self, wnd_name):
ar = self.mouse_events.get(wnd_name, [])
self.mouse_events[wnd_name] = []
return ar
def get_key_events(self, wnd_name):
ar = self.key_events.get(wnd_name, [])
self.key_events[wnd_name] = []
return ar
def input_number(self, s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = float(inp)
if (valid_list is not None) and (i not in valid_list):
return default_value
return i
except:
print (default_value)
return default_value
def input_int(self,s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = int(inp)
if (valid_list is not None) and (i not in valid_list):
return default_value
return i
except:
print (default_value)
return default_value
def input_bool(self, s, default_value, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
return bool ( {"y":True,"n":False,"1":True,"0":False}.get(inp.lower(), default_value) )
except:
print ( "y" if default_value else "n" )
return default_value
def input_str(self, s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
if (valid_list is not None) and (inp.lower() not in valid_list):
return default_value
return inp
except:
print (default_value)
return default_value
def input_process(self, stdin_fd, sq, str):
sys.stdin = os.fdopen(stdin_fd)
try:
inp = input (str)
sq.put (True)
except:
sq.put (False)
def input_in_time (self, str, max_time_sec):
sq = multiprocessing.Queue()
p = multiprocessing.Process(target=self.input_process, args=( sys.stdin.fileno(), sq, str))
p.start()
t = time.time()
inp = False
while True:
if not sq.empty():
inp = sq.get()
break
if time.time() - t > max_time_sec:
break
p.terminate()
sys.stdin = os.fdopen( sys.stdin.fileno() )
return inp
interact = Interact()

View file

@ -0,0 +1,42 @@
import time
import multiprocessing
class SubprocessFunctionCaller(object):
class CliFunction(object):
def __init__(self, s2c, c2s, lock):
self.s2c = s2c
self.c2s = c2s
self.lock = lock
def __call__(self, value):
self.lock.acquire()
self.c2s.put (value)
while True:
if not self.s2c.empty():
obj = self.s2c.get()
self.lock.release()
return obj
time.sleep(0.005)
class HostProcessor(object):
def __init__(self, s2c, c2s, func):
self.s2c = s2c
self.c2s = c2s
self.func = func
def process_messages(self):
while not self.c2s.empty():
obj = self.c2s.get()
result = self.func (obj)
self.s2c.put (result)
@staticmethod
def make_pair( func ):
s2c = multiprocessing.Queue()
c2s = multiprocessing.Queue()
lock = multiprocessing.Lock()
host_processor = SubprocessFunctionCaller.HostProcessor (s2c, c2s, func)
cli_func = SubprocessFunctionCaller.CliFunction (s2c, c2s, lock)
return host_processor, cli_func

255
joblib/SubprocessorBase.py Normal file
View file

@ -0,0 +1,255 @@
import traceback
import multiprocessing
import time
import sys
from interact import interact as io
class Subprocessor(object):
class Cli(object):
def __init__ ( self, client_dict ):
self.s2c = multiprocessing.Queue()
self.c2s = multiprocessing.Queue()
self.p = multiprocessing.Process(target=self._subprocess_run, args=(client_dict,) )
self.p.daemon = True
self.p.start()
self.state = None
self.sent_time = None
self.sent_data = None
self.name = None
self.host_dict = None
def kill(self):
self.p.terminate()
self.p.join()
#overridable optional
def on_initialize(self, client_dict):
#initialize your subprocess here using client_dict
pass
#overridable optional
def on_finalize(self):
#finalize your subprocess here
pass
#overridable
def process_data(self, data):
#process 'data' given from host and return result
raise NotImplementedError
#overridable optional
def get_data_name (self, data):
#return string identificator of your 'data'
return "undefined"
def log_info(self, msg): self.c2s.put ( {'op': 'log_info', 'msg':msg } )
def log_err(self, msg): self.c2s.put ( {'op': 'log_err' , 'msg':msg } )
def progress_bar_inc(self, c): self.c2s.put ( {'op': 'progress_bar_inc' , 'c':c } )
def _subprocess_run(self, client_dict):
data = None
s2c, c2s = self.s2c, self.c2s
try:
self.on_initialize(client_dict)
c2s.put ( {'op': 'init_ok'} )
while True:
msg = s2c.get()
op = msg.get('op','')
if op == 'data':
data = msg['data']
result = self.process_data (data)
c2s.put ( {'op': 'success', 'data' : data, 'result' : result} )
data = None
elif op == 'close':
break
time.sleep(0.001)
self.on_finalize()
c2s.put ( {'op': 'finalized'} )
except Exception as e:
if data is not None:
print ('Exception while process data [%s]: %s' % (self.get_data_name(data), traceback.format_exc()) )
else:
print ('Exception: %s' % (traceback.format_exc()) )
c2s.put ( {'op': 'error', 'data' : data} )
#overridable
def __init__(self, name, SubprocessorCli_class, no_response_time_sec = 60):
if not issubclass(SubprocessorCli_class, Subprocessor.Cli):
raise ValueError("SubprocessorCli_class must be subclass of Subprocessor.Cli")
self.name = name
self.SubprocessorCli_class = SubprocessorCli_class
self.no_response_time_sec = no_response_time_sec
#overridable
def process_info_generator(self):
#yield per process (name, host_dict, client_dict)
raise NotImplementedError
#overridable optional
def on_clients_initialized(self):
#logic when all subprocesses initialized and ready
pass
#overridable optional
def on_clients_finalized(self):
#logic when all subprocess finalized
pass
#overridable
def get_data(self, host_dict):
#return data for processing here
raise NotImplementedError
#overridable
def on_data_return (self, host_dict, data):
#you have to place returned 'data' back to your queue
raise NotImplementedError
#overridable
def on_result (self, host_dict, data, result):
#your logic what to do with 'result' of 'data'
raise NotImplementedError
#overridable
def get_result(self):
#return result that will be returned in func run()
raise NotImplementedError
#overridable
def on_tick(self):
#tick in main loop
pass
def run(self):
self.clis = []
#getting info about name of subprocesses, host and client dicts, and spawning them
for name, host_dict, client_dict in self.process_info_generator():
try:
cli = self.SubprocessorCli_class(client_dict)
cli.state = 1
cli.sent_time = time.time()
cli.sent_data = None
cli.name = name
cli.host_dict = host_dict
self.clis.append (cli)
except:
raise Exception ("Unable to start subprocess %s" % (name))
if len(self.clis) == 0:
raise Exception ("Unable to start Subprocessor '%s' " % (self.name))
#waiting subprocesses their success(or not) initialization
while True:
for cli in self.clis[:]:
while not cli.c2s.empty():
obj = cli.c2s.get()
op = obj.get('op','')
if op == 'init_ok':
cli.state = 0
elif op == 'error':
cli.kill()
self.clis.remove(cli)
break
if all ([cli.state == 0 for cli in self.clis]):
break
io.process_messages(0.005)
if len(self.clis) == 0:
raise Exception ( "Unable to start subprocesses." )
#ok some processes survived, initialize host logic
self.on_clients_initialized()
#main loop of data processing
while True:
for cli in self.clis[:]:
while not cli.c2s.empty():
obj = cli.c2s.get()
op = obj.get('op','')
if op == 'success':
#success processed data, return data and result to on_result
self.on_result (cli.host_dict, obj['data'], obj['result'])
self.sent_data = None
cli.state = 0
elif op == 'error':
#some error occured while process data, returning chunk to on_data_return
if 'data' in obj.keys():
self.on_data_return (cli.host_dict, obj['data'] )
#and killing process
cli.kill()
self.clis.remove(cli)
elif op == 'log_info':
io.log_info(obj['msg'])
elif op == 'log_err':
io.log_err(obj['msg'])
elif op == 'progress_bar_inc':
io.progress_bar_inc(obj['c'])
for cli in self.clis[:]:
if cli.state == 0:
#free state of subprocess, get some data from get_data
data = self.get_data(cli.host_dict)
if data is not None:
#and send it to subprocess
cli.s2c.put ( {'op': 'data', 'data' : data} )
cli.sent_time = time.time()
cli.sent_data = data
cli.state = 1
elif cli.state == 1:
if self.no_response_time_sec != 0 and (time.time() - cli.sent_time) > self.no_response_time_sec:
#subprocess busy too long
print ( '%s doesnt response, terminating it.' % (cli.name) )
self.on_data_return (cli.host_dict, cli.sent_data )
cli.kill()
self.clis.remove(cli)
if all ([cli.state == 0 for cli in self.clis]):
#all subprocesses free and no more data available to process, ending loop
break
io.process_messages(0.005)
self.on_tick()
#gracefully terminating subprocesses
for cli in self.clis[:]:
cli.s2c.put ( {'op': 'close'} )
cli.sent_time = time.time()
while True:
for cli in self.clis[:]:
terminate_it = False
while not cli.c2s.empty():
obj = cli.c2s.get()
obj_op = obj['op']
if obj_op == 'finalized':
terminate_it = True
break
if self.no_response_time_sec != 0 and (time.time() - cli.sent_time) > self.no_response_time_sec:
terminate_it = True
if terminate_it:
cli.state = 2
cli.kill()
if all ([cli.state == 2 for cli in self.clis]):
break
#finalizing host logic and return result
self.on_clients_finalized()
return self.get_result()

2
joblib/__init__.py Normal file
View file

@ -0,0 +1,2 @@
from .SubprocessorBase import Subprocessor
from .SubprocessFunctionCaller import SubprocessFunctionCaller

104
main.py
View file

@ -1,5 +1,6 @@
import os
import sys
import time
import argparse
from utils import Path_utils
from utils import os_utils
@ -12,35 +13,26 @@ class fixPathAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
os_utils.set_process_lowest_prio()
parser = argparse.ArgumentParser()
parser.add_argument('--tf-suppress-std', action="store_true", dest="tf_suppress_std", default=False, help="Suppress tensorflow initialization info. May not works on some python builds such as anaconda python 3.6.4. If you can fix it, you are welcome.")
subparsers = parser.add_subparsers()
def process_extract(arguments):
from mainscripts import Extractor
Extractor.main (
input_dir=arguments.input_dir,
output_dir=arguments.output_dir,
debug=arguments.debug,
face_type=arguments.face_type,
detector=arguments.detector,
multi_gpu=arguments.multi_gpu,
cpu_only=arguments.cpu_only,
manual_fix=arguments.manual_fix,
manual_output_debug_fix=arguments.manual_output_debug_fix,
manual_window_size=arguments.manual_window_size
)
from mainscripts import Extractor
Extractor.main( arguments.input_dir,
arguments.output_dir,
arguments.debug,
arguments.detector,
arguments.manual_fix,
arguments.manual_output_debug_fix,
arguments.manual_window_size,
face_type=arguments.face_type,
device_args={'cpu_only' : arguments.cpu_only,
'multi_gpu' : arguments.multi_gpu,
}
)
extract_parser = subparsers.add_parser( "extract", help="Extract the faces from a pictures.")
extract_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
@ -79,24 +71,26 @@ if __name__ == "__main__":
util_parser.add_argument('--add-landmarks-debug-images', action="store_true", dest="add_landmarks_debug_images", default=False, help="Add landmarks debug image for aligned faces.")
util_parser.set_defaults (func=process_util)
def process_train(arguments):
from mainscripts import Trainer
Trainer.main (
training_data_src_dir=arguments.training_data_src_dir,
training_data_dst_dir=arguments.training_data_dst_dir,
model_path=arguments.model_dir,
model_name=arguments.model_name,
debug = arguments.debug,
#**options
force_gpu_idx = arguments.force_gpu_idx,
cpu_only = arguments.cpu_only
)
def process_train(arguments):
args = {'training_data_src_dir' : arguments.training_data_src_dir,
'training_data_dst_dir' : arguments.training_data_dst_dir,
'model_path' : arguments.model_dir,
'model_name' : arguments.model_name,
'no_preview' : arguments.no_preview,
'debug' : arguments.debug,
}
device_args = {'cpu_only' : arguments.cpu_only,
'force_gpu_idx' : arguments.force_gpu_idx,
}
from mainscripts import Trainer
Trainer.main(args, device_args)
train_parser = subparsers.add_parser( "train", help="Trainer")
train_parser.add_argument('--training-data-src-dir', required=True, action=fixPathAction, dest="training_data_src_dir", help="Dir of src-set.")
train_parser.add_argument('--training-data-dst-dir', required=True, action=fixPathAction, dest="training_data_dst_dir", help="Dir of dst-set.")
train_parser.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.")
train_parser.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model")
train_parser.add_argument('--no-preview', action="store_true", dest="no_preview", default=False, help="Disable preview window.")
train_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug samples.")
train_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.")
train_parser.add_argument('--force-gpu-idx', type=int, dest="force_gpu_idx", default=-1, help="Force to choose this GPU idx.")
@ -104,17 +98,18 @@ if __name__ == "__main__":
train_parser.set_defaults (func=process_train)
def process_convert(arguments):
args = {'input_dir' : arguments.input_dir,
'output_dir' : arguments.output_dir,
'aligned_dir' : arguments.aligned_dir,
'model_dir' : arguments.model_dir,
'model_name' : arguments.model_name,
'debug' : arguments.debug,
}
device_args = {'cpu_only' : arguments.cpu_only,
'force_gpu_idx' : arguments.force_gpu_idx,
}
from mainscripts import Converter
Converter.main (
input_dir=arguments.input_dir,
output_dir=arguments.output_dir,
aligned_dir=arguments.aligned_dir,
model_dir=arguments.model_dir,
model_name=arguments.model_name,
debug = arguments.debug,
force_gpu_idx = arguments.force_gpu_idx,
cpu_only = arguments.cpu_only
)
Converter.main (args, device_args)
convert_parser = subparsers.add_parser( "convert", help="Converter")
convert_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
@ -134,14 +129,27 @@ if __name__ == "__main__":
parser.set_defaults(func=bad_args)
arguments = parser.parse_args()
if arguments.tf_suppress_std:
os.environ['TF_SUPPRESS_STD'] = '1'
#os.environ['force_plaidML'] = '1'
arguments.func(arguments)
print ("Done.")
"""
Suppressing error with keras 2.2.4+ on python exit:
Exception ignored in: <bound method BaseSession._Callable.__del__ of <tensorflow.python.client.session.BaseSession._Callable object at 0x000000001BDEA9B0>>
Traceback (most recent call last):
File "D:\DeepFaceLab\_internal\bin\lib\site-packages\tensorflow\python\client\session.py", line 1413, in __del__
AttributeError: 'NoneType' object has no attribute 'raise_exception_on_not_ok_status'
reproduce: https://github.com/keras-team/keras/issues/11751 ( still no solution )
"""
outnull_file = open(os.devnull, 'w')
os.dup2 ( outnull_file.fileno(), sys.stderr.fileno() )
sys.stderr = outnull_file
'''
import code

View file

@ -4,7 +4,6 @@ import traceback
from pathlib import Path
from utils import Path_utils
import cv2
from tqdm import tqdm
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from utils.cv2_utils import *
@ -13,219 +12,196 @@ import shutil
import numpy as np
import time
import multiprocessing
from models import ConverterBase
from converters import Converter
from joblib import Subprocessor, SubprocessFunctionCaller
from interact import interact as io
class model_process_predictor(object):
def __init__(self, sq, cq, lock):
self.sq = sq
self.cq = cq
self.lock = lock
class ConvertSubprocessor(Subprocessor):
class Cli(Subprocessor.Cli):
def __call__(self, face):
self.lock.acquire()
self.sq.put ( {'op': 'predict', 'face' : face} )
while True:
if not self.cq.empty():
obj = self.cq.get()
obj_op = obj['op']
if obj_op == 'predict_result':
self.lock.release()
return obj['result']
time.sleep(0.005)
def model_process(stdin_fd, model_name, model_dir, in_options, sq, cq):
sys.stdin = os.fdopen(stdin_fd)
try:
model_path = Path(model_dir)
import models
model = models.import_model(model_name)(model_path, **in_options)
converter = model.get_converter(**in_options)
converter.dummy_predict()
cq.put ( {'op':'init', 'converter' : converter.copy_and_set_predictor( None ) } )
#override
def on_initialize(self, client_dict):
io.log_info ('Running on %s.' % (client_dict['device_name']) )
self.device_idx = client_dict['device_idx']
self.device_name = client_dict['device_name']
self.converter = client_dict['converter']
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
self.alignments = client_dict['alignments']
self.debug = client_dict['debug']
while True:
while not sq.empty():
obj = sq.get()
obj_op = obj['op']
if obj_op == 'predict':
result = converter.predictor ( obj['face'] )
cq.put ( {'op':'predict_result', 'result':result} )
time.sleep(0.005)
except Exception as e:
print ( 'Error: %s' % (str(e)))
traceback.print_exc()
from utils.SubprocessorBase import SubprocessorBase
class ConvertSubprocessor(SubprocessorBase):
from nnlib import nnlib
#model process ate all GPU mem,
#so we cannot use GPU for any TF operations in converter processes
#therefore forcing active_DeviceConfig to CPU only
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
return None
#override
def process_data(self, data):
filename_path = Path(data)
files_processed = 1
faces_processed = 0
output_filename_path = self.output_path / (filename_path.stem + '.png')
if self.converter.type == Converter.TYPE_FACE and filename_path.stem not in self.alignments.keys():
if not self.debug:
self.log_info ( 'no faces found for %s, copying without faces' % (filename_path.name) )
shutil.copy ( str(filename_path), str(output_filename_path) )
else:
image = (cv2_imread(str(filename_path)) / 255.0).astype(np.float32)
if self.converter.type == Converter.TYPE_IMAGE:
image = self.converter.convert_image(image, None, self.debug)
if self.debug:
raise NotImplementedError
#for img in image:
# io.show_image ('Debug convert', img )
# cv2.waitKey(0)
faces_processed = 1
elif self.converter.type == Converter.TYPE_IMAGE_WITH_LANDMARKS:
if filename_path.suffix == '.png':
dflimg = DFLPNG.load( str(filename_path) )
elif filename_path.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filename_path) )
else:
dflimg = None
if dflimg is not None:
image_landmarks = dflimg.get_landmarks()
image = self.converter.convert_image(image, image_landmarks, self.debug)
if self.debug:
raise NotImplementedError
#for img in image:
# io.show_image ('Debug convert', img )
# cv2.waitKey(0)
faces_processed = 1
else:
self.log_err ("%s is not a dfl image file" % (filename_path.name) )
elif self.converter.type == Converter.TYPE_FACE:
faces = self.alignments[filename_path.stem]
if self.debug:
debug_images = []
for face_num, image_landmarks in enumerate(faces):
try:
if self.debug:
self.log_info ( '\nConverting face_num [%d] in file [%s]' % (face_num, filename_path) )
if self.debug:
debug_images += self.converter.convert_face(image, image_landmarks, self.debug)
else:
image = self.converter.convert_face(image, image_landmarks, self.debug)
except Exception as e:
self.log_info ( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, str(e)) )
traceback.print_exc()
if self.debug:
return (1, debug_images)
faces_processed = len(faces)
if not self.debug:
cv2_imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
return (0, files_processed, faces_processed)
#overridable
def get_data_name (self, data):
#return string identificator of your data
return data
#override
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False, **in_options):
super().__init__('Converter', 86400 if debug == True else 60)
self.converter = converter
self.input_path_image_paths = input_path_image_paths
def __init__(self, converter, input_path_image_paths, output_path, alignments, debug = False):
super().__init__('Converter', ConvertSubprocessor.Cli, 86400 if debug == True else 60)
self.converter = converter
self.host_processor, self.cli_func = SubprocessFunctionCaller.make_pair ( self.converter.predictor_func )
self.process_converter = self.converter.copy_and_set_predictor(self.cli_func)
self.input_data = self.input_path_image_paths = input_path_image_paths
self.output_path = output_path
self.alignments = alignments
self.debug = debug
self.in_options = in_options
self.input_data = self.input_path_image_paths
self.files_processed = 0
self.faces_processed = 0
#override
def process_info_generator(self):
r = [0] if self.debug else range( min(multiprocessing.cpu_count(), 6) )
for i in r:
yield 'CPU%d' % (i), {}, {'device_idx': i,
'device_name': 'CPU%d' % (i),
'converter' : self.converter,
'converter' : self.process_converter,
'output_dir' : str(self.output_path),
'alignments' : self.alignments,
'debug': self.debug,
'in_options': self.in_options
'debug': self.debug
}
#override
def get_no_process_started_message(self):
return 'Unable to start CPU processes.'
#overridable optional
def on_clients_initialized(self):
if self.debug:
io.named_window ("Debug convert")
io.progress_bar ("Converting", len (self.input_data) )
#overridable optional
def on_clients_finalized(self):
io.progress_bar_close()
if self.debug:
io.destroy_all_windows()
#override
def onHostGetProgressBarDesc(self):
return "Converting"
#override
def onHostGetProgressBarLen(self):
return len (self.input_data)
#override
def onHostGetData(self, host_dict):
def get_data(self, host_dict):
if len (self.input_data) > 0:
return self.input_data.pop(0)
return None
#override
def onHostDataReturn (self, host_dict, data):
def on_data_return (self, host_dict, data):
self.input_data.insert(0, data)
#overridable
def onClientGetDataName (self, data):
#return string identificator of your data
return data
#override
def onClientInitialize(self, client_dict):
print ('Running on %s.' % (client_dict['device_name']) )
self.device_idx = client_dict['device_idx']
self.device_name = client_dict['device_name']
self.converter = client_dict['converter']
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
self.alignments = client_dict['alignments']
self.debug = client_dict['debug']
from nnlib import nnlib
#model process ate all GPU mem,
#so we cannot use GPU for any TF operations in converter processes (for example image_utils.TFLabConverter)
#therefore forcing active_DeviceConfig to CPU only
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
return None
#override
def onClientFinalize(self):
pass
def on_result (self, host_dict, data, result):
if result[0] == 0:
self.files_processed += result[0]
self.faces_processed += result[1]
elif result[0] == 1:
for img in result[1]:
io.show_image ('Debug convert', (img*255).astype(np.uint8) )
io.wait_any_key()
io.progress_bar_inc(1)
#override
def on_tick(self):
self.host_processor.process_messages()
#override
def onClientProcessData(self, data):
filename_path = Path(data)
files_processed = 1
faces_processed = 0
output_filename_path = self.output_path / (filename_path.stem + '.png')
if self.converter.get_mode() == ConverterBase.MODE_FACE and filename_path.stem not in self.alignments.keys():
if not self.debug:
print ( 'no faces found for %s, copying without faces' % (filename_path.name) )
shutil.copy ( str(filename_path), str(output_filename_path) )
else:
image = (cv2_imread(str(filename_path)) / 255.0).astype(np.float32)
if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
image = self.converter.convert_image(image, None, self.debug)
if self.debug:
for img in image:
cv2.imshow ('Debug convert', img )
cv2.waitKey(0)
faces_processed = 1
elif self.converter.get_mode() == ConverterBase.MODE_IMAGE_WITH_LANDMARKS:
if filename_path.suffix == '.png':
dflimg = DFLPNG.load( str(filename_path), throw_on_no_embedded_data=True )
elif filename_path.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filename_path), throw_on_no_embedded_data=True )
else:
raise Exception ("%s is not a dfl image file" % (filename_path.name) )
image_landmarks = dflimg.get_landmarks()
image = self.converter.convert_image(image, image_landmarks, self.debug)
if self.debug:
for img in image:
cv2.imshow ('Debug convert', img )
cv2.waitKey(0)
faces_processed = 1
elif self.converter.get_mode() == ConverterBase.MODE_FACE:
faces = self.alignments[filename_path.stem]
for face_num, image_landmarks in enumerate(faces):
try:
if self.debug:
print ( '\nConverting face_num [%d] in file [%s]' % (face_num, filename_path) )
image = self.converter.convert_face(image, image_landmarks, self.debug)
if self.debug:
for img in image:
cv2.imshow ('Debug convert', (img*255).astype(np.uint8) )
cv2.waitKey(0)
except Exception as e:
print ( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, str(e)) )
traceback.print_exc()
faces_processed = len(faces)
if not self.debug:
cv2_imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
return (files_processed, faces_processed)
#override
def onHostResult (self, host_dict, data, result):
self.files_processed += result[0]
self.faces_processed += result[1]
return 1
#override
def onFinalizeAndGetResult(self):
def get_result(self):
return self.files_processed, self.faces_processed
def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_options):
print ("Running converter.\r\n")
def main (args, device_args):
io.log_info ("Running converter.\r\n")
debug = in_options['debug']
aligned_dir = args.get('aligned_dir', None)
try:
input_path = Path(input_dir)
output_path = Path(output_dir)
model_path = Path(model_dir)
input_path = Path(args['input_dir'])
output_path = Path(args['output_dir'])
model_path = Path(args['model_dir'])
if not input_path.exists():
print('Input directory not found. Please ensure it exists.')
io.log_err('Input directory not found. Please ensure it exists.')
return
if output_path.exists():
@ -233,126 +209,116 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
Path(filename).unlink()
else:
output_path.mkdir(parents=True, exist_ok=True)
if not model_path.exists():
print('Model directory not found. Please ensure it exists.')
io.log_err('Model directory not found. Please ensure it exists.')
return
model_sq = multiprocessing.Queue()
model_cq = multiprocessing.Queue()
model_lock = multiprocessing.Lock()
model_p = multiprocessing.Process(target=model_process, args=( sys.stdin.fileno(), model_name, model_dir, in_options, model_sq, model_cq))
model_p.start()
while True:
if not model_cq.empty():
obj = model_cq.get()
obj_op = obj['op']
if obj_op == 'init':
converter = obj['converter']
break
import models
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
converter = model.get_converter()
converter.dummy_predict()
alignments = None
if converter.get_mode() == ConverterBase.MODE_FACE:
if converter.type == Converter.TYPE_FACE:
if aligned_dir is None:
print('Aligned directory not found. Please ensure it exists.')
io.log_err('Aligned directory not found. Please ensure it exists.')
return
aligned_path = Path(aligned_dir)
if not aligned_path.exists():
print('Aligned directory not found. Please ensure it exists.')
io.log_err('Aligned directory not found. Please ensure it exists.')
return
alignments = {}
aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
for filepath in tqdm(aligned_path_image_paths, desc="Collecting alignments", ascii=True ):
for filepath in io.progress_bar_generator(aligned_path_image_paths, "Collecting alignments"):
filepath = Path(filepath)
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
dflimg = DFLPNG.load( str(filepath) )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
dflimg = DFLJPG.load ( str(filepath) )
else:
print ("%s is not a dfl image file" % (filepath.name) )
dflimg = None
if dflimg is None:
io.log_err ("%s is not a dfl image file" % (filepath.name) )
continue
source_filename_stem = Path( dflimg.get_source_filename() ).stem
if source_filename_stem not in alignments.keys():
alignments[ source_filename_stem ] = []
alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
#interpolate landmarks
#from facelib import LandmarksProcessor
#from facelib import FaceType
#a = sorted(alignments.keys())
#a_len = len(a)
#
#box_pts = 3
#box = np.ones(box_pts)/box_pts
#for i in range( a_len ):
# if i >= box_pts and i <= a_len-box_pts-1:
# af0 = alignments[ a[i] ][0] ##first face
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
#
# points = []
#
# for j in range(-box_pts, box_pts+1):
# af = alignments[ a[i+j] ][0] ##first face
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
# p = LandmarksProcessor.transform_points (af, m)
# points.append (p)
#
# points = np.array(points)
# points_len = len(points)
# t_points = np.transpose(points, [1,0,2])
#
# p1 = np.array ( [ int(np.convolve(x[:,0], box, mode='same')[points_len//2]) for x in t_points ] )
# p2 = np.array ( [ int(np.convolve(x[:,1], box, mode='same')[points_len//2]) for x in t_points ] )
#
# new_points = np.concatenate( [np.expand_dims(p1,-1),np.expand_dims(p2,-1)], -1 )
#
# alignments[ a[i] ][0] = LandmarksProcessor.transform_points (new_points, m0, True).astype(np.int32)
files_processed, faces_processed = ConvertSubprocessor (
converter = converter.copy_and_set_predictor( model_process_predictor(model_sq,model_cq,model_lock) ),
converter = converter,
input_path_image_paths = Path_utils.get_image_paths(input_path),
output_path = output_path,
alignments = alignments,
**in_options ).process()
alignments = alignments,
debug = args.get('debug',False)
).run()
model_p.terminate()
'''
if model_name == 'AVATAR':
output_path_image_paths = Path_utils.get_image_paths(output_path)
last_ok_frame = -1
for filename in output_path_image_paths:
filename_path = Path(filename)
stem = Path(filename).stem
try:
frame = int(stem)
except:
raise Exception ('Aligned avatars must be created from indexed sequence files.')
if frame-last_ok_frame > 1:
start = last_ok_frame + 1
end = frame - 1
print ("Filling gaps: [%d...%d]" % (start, end) )
for i in range (start, end+1):
shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix )) ) )
last_ok_frame = frame
'''
model.finalize()
except Exception as e:
print ( 'Error: %s' % (str(e)))
traceback.print_exc()
'''
if model_name == 'AVATAR':
output_path_image_paths = Path_utils.get_image_paths(output_path)
last_ok_frame = -1
for filename in output_path_image_paths:
filename_path = Path(filename)
stem = Path(filename).stem
try:
frame = int(stem)
except:
raise Exception ('Aligned avatars must be created from indexed sequence files.')
if frame-last_ok_frame > 1:
start = last_ok_frame + 1
end = frame - 1
print ("Filling gaps: [%d...%d]" % (start, end) )
for i in range (start, end+1):
shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix )) ) )
last_ok_frame = frame
'''
#interpolate landmarks
#from facelib import LandmarksProcessor
#from facelib import FaceType
#a = sorted(alignments.keys())
#a_len = len(a)
#
#box_pts = 3
#box = np.ones(box_pts)/box_pts
#for i in range( a_len ):
# if i >= box_pts and i <= a_len-box_pts-1:
# af0 = alignments[ a[i] ][0] ##first face
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
#
# points = []
#
# for j in range(-box_pts, box_pts+1):
# af = alignments[ a[i+j] ][0] ##first face
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
# p = LandmarksProcessor.transform_points (af, m)
# points.append (p)
#
# points = np.array(points)
# points_len = len(points)
# t_points = np.transpose(points, [1,0,2])
#
# p1 = np.array ( [ int(np.convolve(x[:,0], box, mode='same')[points_len//2]) for x in t_points ] )
# p2 = np.array ( [ int(np.convolve(x[:,1], box, mode='same')[points_len//2]) for x in t_points ] )
#
# new_points = np.concatenate( [np.expand_dims(p1,-1),np.expand_dims(p2,-1)], -1 )
#
# alignments[ a[i] ][0] = LandmarksProcessor.transform_points (new_points, m0, True).astype(np.int32)

View file

@ -15,10 +15,137 @@ import facelib
from facelib import FaceType
from facelib import LandmarksProcessor
from nnlib import nnlib
from joblib import Subprocessor
from interact import interact as io
class ExtractSubprocessor(Subprocessor):
class Cli(Subprocessor.Cli):
from utils.SubprocessorBase import SubprocessorBase
class ExtractSubprocessor(SubprocessorBase):
#override
def on_initialize(self, client_dict):
self.log_info ('Running on %s.' % (client_dict['device_name']) )
self.type = client_dict['type']
self.image_size = client_dict['image_size']
self.face_type = client_dict['face_type']
self.device_idx = client_dict['device_idx']
self.cpu_only = client_dict['device_type'] == 'CPU'
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
self.debug = client_dict['debug']
self.detector = client_dict['detector']
self.cached_image = (None, None)
self.e = None
device_config = nnlib.DeviceConfig ( cpu_only=self.cpu_only, force_gpu_idx=self.device_idx, allow_growth=True)
if self.type == 'rects':
if self.detector is not None:
if self.detector == 'mt':
nnlib.import_all (device_config)
self.e = facelib.MTCExtractor()
elif self.detector == 'dlib':
nnlib.import_dlib (device_config)
self.e = facelib.DLIBExtractor(nnlib.dlib)
else:
raise ValueError ("Wrond detector type.")
if self.e is not None:
self.e.__enter__()
elif self.type == 'landmarks':
nnlib.import_all (device_config)
self.e = facelib.LandmarksExtractor(nnlib.keras)
self.e.__enter__()
elif self.type == 'final':
pass
#override
def on_finalize(self):
if self.e is not None:
self.e.__exit__()
#override
def process_data(self, data):
filename_path = Path( data[0] )
filename_path_str = str(filename_path)
if self.cached_image[0] == filename_path_str:
image = self.cached_image[1]
else:
image = cv2_imread( filename_path_str )
self.cached_image = ( filename_path_str, image )
if image is None:
self.log_err ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
else:
if self.type == 'rects':
rects = self.e.extract_from_bgr (image)
return [str(filename_path), rects]
elif self.type == 'landmarks':
rects = data[1]
landmarks = self.e.extract_from_bgr (image, rects)
return [str(filename_path), landmarks]
elif self.type == 'final':
src_dflimg = None
(h,w,c) = image.shape
if h == w:
#extracting from already extracted jpg image?
if filename_path.suffix == '.jpg':
src_dflimg = DFLJPG.load ( str(filename_path) )
result = []
faces = data[1]
if self.debug:
debug_output_file = '{}{}'.format( str(Path(str(self.output_path) + '_debug') / filename_path.stem), '.jpg')
debug_image = image.copy()
for (face_idx, face) in enumerate(faces):
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
rect = face[0]
image_landmarks = np.array(face[1])
if self.debug:
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type)
if self.face_type == FaceType.MARK_ONLY:
face_image = image
face_image_landmarks = image_landmarks
else:
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
if src_dflimg is not None:
#if extracting from dflimg just copy it in order not to lose quality
shutil.copy ( str(filename_path), str(output_file) )
else:
cv2_imwrite(output_file, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), 85] )
DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
landmarks = face_image_landmarks.tolist(),
source_filename = filename_path.name,
source_rect= rect,
source_landmarks = image_landmarks.tolist()
)
result.append (output_file)
if self.debug:
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
return result
return None
#overridable
def get_data_name (self, data):
#return string identificator of your data
return data[0]
#override
def __init__(self, input_data, type, image_size, face_type, debug, multi_gpu=False, cpu_only=False, manual=False, manual_window_size=0, detector=None, output_path=None ):
self.input_data = input_data
@ -35,34 +162,36 @@ class ExtractSubprocessor(SubprocessorBase):
self.result = []
no_response_time_sec = 60 if not self.manual else 999999
super().__init__('Extractor', no_response_time_sec)
super().__init__('Extractor', ExtractSubprocessor.Cli, no_response_time_sec)
#override
def onHostClientsInitialized(self):
def on_clients_initialized(self):
if self.manual == True:
self.wnd_name = 'Manual pass'
cv2.namedWindow(self.wnd_name)
self.landmarks = None
self.param_x = -1
self.param_y = -1
self.param_rect_size = -1
self.param = {'x': 0, 'y': 0, 'rect_size' : 100, 'rect_locked' : False, 'redraw_needed' : False }
io.named_window(self.wnd_name)
io.capture_mouse(self.wnd_name)
io.capture_keys(self.wnd_name)
def onMouse(event, x, y, flags, param):
if event == cv2.EVENT_MOUSEWHEEL:
mod = 1 if flags > 0 else -1
diff = 1 if param['rect_size'] <= 40 else np.clip(param['rect_size'] / 10, 1, 10)
param['rect_size'] = max (5, param['rect_size'] + diff*mod)
elif event == cv2.EVENT_LBUTTONDOWN:
param['rect_locked'] = not param['rect_locked']
param['redraw_needed'] = True
elif not param['rect_locked']:
param['x'] = x
param['y'] = y
cv2.setMouseCallback(self.wnd_name, onMouse, self.param)
self.cache_original_image = (None, None)
self.cache_image = (None, None)
self.cache_text_lines_img = (None, None)
self.landmarks = None
self.x = 0
self.y = 0
self.rect_size = 100
self.rect_locked = False
self.redraw_needed = True
io.progress_bar (None, len (self.input_data))
#override
def on_clients_finalized(self):
if self.manual == True:
io.destroy_all_windows()
io.progress_bar_close()
def get_devices_for_type (self, type, multi_gpu, cpu_only):
if not cpu_only and (type == 'rects' or type == 'landmarks'):
if type == 'rects' and self.detector == 'mt' and nnlib.device.backend == "plaidML":
@ -86,8 +215,11 @@ class ExtractSubprocessor(SubprocessorBase):
yield (idx, 'GPU', dev_name, dev_vram)
if cpu_only and (type == 'rects' or type == 'landmarks'):
for i in range( min(8, multiprocessing.cpu_count() // 2) ):
yield (i, 'CPU', 'CPU%d' % (i), 0 )
if self.manual:
yield (0, 'CPU', 'CPU', 0 )
else:
for i in range( min(8, multiprocessing.cpu_count() // 2) ):
yield (i, 'CPU', 'CPU%d' % (i), 0 )
if type == 'final':
for i in range( min(8, multiprocessing.cpu_count()) ):
@ -108,25 +240,9 @@ class ExtractSubprocessor(SubprocessorBase):
client_dict['device_name'] = device_name
client_dict['device_type'] = device_type
yield client_dict['device_name'], {}, client_dict
#override
def get_no_process_started_message(self):
if (self.type == 'rects' or self.type == 'landmarks'):
print ( 'You have no capable GPUs. Try to close programs which can consume VRAM, and run again.')
elif self.type == 'final':
print ( 'Unable to start CPU processes.')
#override
def onHostGetProgressBarDesc(self):
return None
#override
def onHostGetProgressBarLen(self):
return len (self.input_data)
#override
def onHostGetData(self, host_dict):
def get_data(self, host_dict):
if not self.manual:
if len (self.input_data) > 0:
return self.input_data.pop(0)
@ -146,33 +262,68 @@ class ExtractSubprocessor(SubprocessorBase):
if len(faces) > 0:
self.rect, self.landmarks = faces.pop()
self.param['rect_locked'] = True
self.rect_locked = True
self.redraw_needed = True
faces.clear()
self.param['rect_size'] = ( self.rect[2] - self.rect[0] ) / 2
self.param['x'] = ( ( self.rect[0] + self.rect[2] ) / 2 ) * self.view_scale
self.param['y'] = ( ( self.rect[1] + self.rect[3] ) / 2 ) * self.view_scale
self.rect_size = ( self.rect[2] - self.rect[0] ) / 2
self.x = ( self.rect[0] + self.rect[2] ) / 2
self.y = ( self.rect[1] + self.rect[3] ) / 2
if len(faces) == 0:
self.original_image = cv2_imread(filename)
if self.cache_original_image[0] == filename:
self.original_image = self.cache_original_image[1]
else:
self.original_image = cv2_imread( filename )
self.cache_original_image = (filename, self.original_image )
(h,w,c) = self.original_image.shape
self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0/9.0) )
self.original_image = cv2.resize (self.original_image, ( int(w*self.view_scale), int(h*self.view_scale) ), interpolation=cv2.INTER_LINEAR)
(h,w,c) = self.original_image.shape
self.view_scale = 1.0 if self.manual_window_size == 0 else self.manual_window_size / ( h * (16.0/9.0) )
self.text_lines_img = (image_utils.get_draw_text_lines ( self.original_image, (0,0, self.original_image.shape[1], min(100, self.original_image.shape[0]) ),
[ 'Match landmarks with face exactly. Click to confirm/unconfirm selection',
'[Enter] - confirm face landmarks and continue',
'[Space] - confirm as unmarked frame and continue',
'[Mouse wheel] - change rect',
'[,] [.]- prev frame, next frame',
'[Q] - skip remaining frames'
], (1, 1, 1) )*255).astype(np.uint8)
if self.cache_image[0] == (h,w,c) + (self.view_scale,filename):
self.image = self.cache_image[1]
else:
self.image = cv2.resize (self.original_image, ( int(w*self.view_scale), int(h*self.view_scale) ), interpolation=cv2.INTER_LINEAR)
self.cache_image = ( (h,w,c) + (self.view_scale,filename), self.image )
(h,w,c) = self.image.shape
sh = (0,0, w, min(100, h) )
if self.cache_text_lines_img[0] == sh:
self.text_lines_img = self.cache_text_lines_img[1]
else:
self.text_lines_img = (image_utils.get_draw_text_lines ( self.image, sh,
[ 'Match landmarks with face exactly. Click to confirm/unconfirm selection',
'[Enter] - confirm face landmarks and continue',
'[Space] - confirm as unmarked frame and continue',
'[Mouse wheel] - change rect',
'[,] [.]- prev frame, next frame',
'[Q] - skip remaining frames'
], (1, 1, 1) )*255).astype(np.uint8)
self.cache_text_lines_img = (sh, self.text_lines_img)
while True:
key = cv2.waitKey(1) & 0xFF
new_x = self.x
new_y = self.y
new_rect_size = self.rect_size
mouse_events = io.get_mouse_events(self.wnd_name)
for ev in mouse_events:
(x, y, ev, flags) = ev
if ev == io.EVENT_MOUSEWHEEL and not self.rect_locked:
mod = 1 if flags > 0 else -1
diff = 1 if new_rect_size <= 40 else np.clip(new_rect_size / 10, 1, 10)
new_rect_size = max (5, new_rect_size + diff*mod)
elif ev == io.EVENT_LBUTTONDOWN:
self.rect_locked = not self.rect_locked
self.redraw_needed = True
elif not self.rect_locked:
new_x = np.clip (x, 0, w-1) / self.view_scale
new_y = np.clip (y, 0, h-1) / self.view_scale
key_events = io.get_key_events(self.wnd_name)
key, = key_events[-1] if len(key_events) > 0 else (0,)
if key == ord('\r') or key == ord('\n'):
faces.append ( [(self.rect), self.landmarks] )
is_frame_done = True
@ -183,200 +334,81 @@ class ExtractSubprocessor(SubprocessorBase):
elif key == ord('.'):
allow_remark_faces = True
# Only save the face if the rect is still locked
if self.param['rect_locked']:
if self.rect_locked:
faces.append ( [(self.rect), self.landmarks] )
is_frame_done = True
break
elif key == ord(',') and len(self.result) > 0:
# Only save the face if the rect is still locked
if self.param['rect_locked']:
if self.rect_locked:
faces.append ( [(self.rect), self.landmarks] )
go_to_prev_frame = True
break
elif key == ord('q'):
skip_remaining = True
break
new_param_x = np.clip (self.param['x'], 0, w-1) / self.view_scale
new_param_y = np.clip (self.param['y'], 0, h-1) / self.view_scale
new_param_rect_size = self.param['rect_size']
if self.param_x != new_param_x or \
self.param_y != new_param_y or \
self.param_rect_size != new_param_rect_size or \
self.param['redraw_needed']:
self.param_x = new_param_x
self.param_y = new_param_y
self.param_rect_size = new_param_rect_size
if self.x != new_x or \
self.y != new_y or \
self.rect_size != new_rect_size or \
self.redraw_needed:
self.x = new_x
self.y = new_y
self.rect_size = new_rect_size
self.rect = ( int(self.param_x-self.param_rect_size),
int(self.param_y-self.param_rect_size),
int(self.param_x+self.param_rect_size),
int(self.param_y+self.param_rect_size) )
self.rect = ( int(self.x-self.rect_size),
int(self.y-self.rect_size),
int(self.x+self.rect_size),
int(self.y+self.rect_size) )
return [filename, [self.rect]]
io.process_messages(0.0001)
else:
is_frame_done = True
if is_frame_done:
self.result.append ( data )
self.input_data.pop(0)
self.inc_progress_bar(1)
self.param['redraw_needed'] = True
self.param['rect_locked'] = False
io.progress_bar_inc(1)
self.redraw_needed = True
self.rect_locked = False
elif go_to_prev_frame:
self.input_data.insert(0, self.result.pop() )
self.inc_progress_bar(-1)
io.progress_bar_inc(-1)
allow_remark_faces = True
self.param['redraw_needed'] = True
self.param['rect_locked'] = False
self.redraw_needed = True
self.rect_locked = False
elif skip_remaining:
if self.param['rect_locked']:
if self.rect_locked:
faces.append ( [(self.rect), self.landmarks] )
while len(self.input_data) > 0:
self.result.append( self.input_data.pop(0) )
self.inc_progress_bar(1)
io.progress_bar_inc(1)
return None
#override
def onHostDataReturn (self, host_dict, data):
def on_data_return (self, host_dict, data):
if not self.manual:
self.input_data.insert(0, data)
#override
def onClientInitialize(self, client_dict):
self.safe_print ('Running on %s.' % (client_dict['device_name']) )
self.type = client_dict['type']
self.image_size = client_dict['image_size']
self.face_type = client_dict['face_type']
self.device_idx = client_dict['device_idx']
self.cpu_only = client_dict['device_type'] == 'CPU'
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
self.debug = client_dict['debug']
self.detector = client_dict['detector']
self.e = None
device_config = nnlib.DeviceConfig ( cpu_only=self.cpu_only, force_gpu_idx=self.device_idx, allow_growth=True)
if self.type == 'rects':
if self.detector is not None:
if self.detector == 'mt':
nnlib.import_all (device_config)
self.e = facelib.MTCExtractor()
elif self.detector == 'dlib':
nnlib.import_dlib (device_config)
self.e = facelib.DLIBExtractor(nnlib.dlib)
self.e.__enter__()
elif self.type == 'landmarks':
nnlib.import_all (device_config)
self.e = facelib.LandmarksExtractor(nnlib.keras)
self.e.__enter__()
elif self.type == 'final':
pass
return None
#override
def onClientFinalize(self):
if self.e is not None:
self.e.__exit__()
#override
def onClientProcessData(self, data):
filename_path = Path( data[0] )
image = cv2_imread( str(filename_path) )
if image is None:
print ( 'Failed to extract %s, reason: cv2_imread() fail.' % ( str(filename_path) ) )
else:
if self.type == 'rects':
rects = self.e.extract_from_bgr (image)
return [str(filename_path), rects]
elif self.type == 'landmarks':
rects = data[1]
landmarks = self.e.extract_from_bgr (image, rects)
return [str(filename_path), landmarks]
elif self.type == 'final':
src_dflimg = None
(h,w,c) = image.shape
if h == w:
#extracting from already extracted jpg image?
if filename_path.suffix == '.jpg':
src_dflimg = DFLJPG.load ( str(filename_path) )
result = []
faces = data[1]
if self.debug:
debug_output_file = '{}{}'.format( str(Path(str(self.output_path) + '_debug') / filename_path.stem), '.jpg')
debug_image = image.copy()
for (face_idx, face) in enumerate(faces):
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
rect = face[0]
image_landmarks = np.array(face[1])
if self.debug:
LandmarksProcessor.draw_rect_landmarks (debug_image, rect, image_landmarks, self.image_size, self.face_type)
if self.face_type == FaceType.MARK_ONLY:
face_image = image
face_image_landmarks = image_landmarks
else:
image_to_face_mat = LandmarksProcessor.get_transform_mat (image_landmarks, self.image_size, self.face_type)
face_image = cv2.warpAffine(image, image_to_face_mat, (self.image_size, self.image_size), cv2.INTER_LANCZOS4)
face_image_landmarks = LandmarksProcessor.transform_points (image_landmarks, image_to_face_mat)
if src_dflimg is not None:
#if extracting from dflimg just copy it in order not to lose quality
shutil.copy ( str(filename_path), str(output_file) )
else:
cv2_imwrite(output_file, face_image, [int(cv2.IMWRITE_JPEG_QUALITY), 85] )
DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
landmarks = face_image_landmarks.tolist(),
source_filename = filename_path.name,
source_rect= rect,
source_landmarks = image_landmarks.tolist()
)
result.append (output_file)
if self.debug:
cv2_imwrite(debug_output_file, debug_image, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
return result
return None
#overridable
def onClientGetDataName (self, data):
#return string identificator of your data
return data[0]
#override
def onHostResult (self, host_dict, data, result):
def on_result (self, host_dict, data, result):
if self.manual == True:
self.landmarks = result[1][0][1]
(h,w,c) = self.original_image.shape
image = cv2.addWeighted (self.original_image,1.0,self.text_lines_img,1.0,0)
(h,w,c) = self.image.shape
image = cv2.addWeighted (self.image,1.0,self.text_lines_img,1.0,0)
view_rect = (np.array(self.rect) * self.view_scale).astype(np.int).tolist()
view_landmarks = (np.array(self.landmarks) * self.view_scale).astype(np.int).tolist()
if self.param_rect_size <= 40:
if self.rect_size <= 40:
scaled_rect_size = h // 3 if w > h else w // 3
p1 = (self.param_x - self.param_rect_size, self.param_y - self.param_rect_size)
p2 = (self.param_x + self.param_rect_size, self.param_y - self.param_rect_size)
p3 = (self.param_x - self.param_rect_size, self.param_y + self.param_rect_size)
p1 = (self.x - self.rect_size, self.y - self.rect_size)
p2 = (self.x + self.rect_size, self.y - self.rect_size)
p3 = (self.x - self.rect_size, self.y + self.rect_size)
wh = h if h < w else w
np1 = (w / 2 - wh / 4, h / 2 - wh / 4)
@ -389,12 +421,11 @@ class ExtractSubprocessor(SubprocessorBase):
LandmarksProcessor.draw_rect_landmarks (image, view_rect, view_landmarks, self.image_size, self.face_type)
if self.param['rect_locked']:
if self.rect_locked:
LandmarksProcessor.draw_landmarks(image, view_landmarks, (255,255,0) )
self.param['redraw_needed'] = False
self.redraw_needed = False
cv2.imshow (self.wnd_name, image)
return 0
io.show_image (self.wnd_name, image)
else:
if self.type == 'rects':
self.result.append ( result )
@ -403,98 +434,91 @@ class ExtractSubprocessor(SubprocessorBase):
elif self.type == 'final':
self.result += result
return 1
io.progress_bar_inc(1)
#override
def onFinalizeAndGetResult(self):
if self.manual == True:
cv2.destroyAllWindows()
def get_result(self):
return self.result
class DeletedFilesSearcherSubprocessor(SubprocessorBase):
class DeletedFilesSearcherSubprocessor(Subprocessor):
class Cli(Subprocessor.Cli):
#override
def on_initialize(self, client_dict):
self.debug_paths_stems = client_dict['debug_paths_stems']
return None
#override
def process_data(self, data):
input_path_stem = Path(data[0]).stem
return any ( [ input_path_stem == d_stem for d_stem in self.debug_paths_stems] )
#override
def get_data_name (self, data):
#return string identificator of your data
return data[0]
#override
def __init__(self, input_paths, debug_paths ):
self.input_paths = input_paths
self.debug_paths_stems = [ Path(d).stem for d in debug_paths]
self.result = []
super().__init__('DeletedFilesSearcherSubprocessor', 60)
super().__init__('DeletedFilesSearcherSubprocessor', DeletedFilesSearcherSubprocessor.Cli, 60)
#override
def process_info_generator(self):
for i in range(0, min(multiprocessing.cpu_count(), 8) ):
yield 'CPU%d' % (i), {}, {'device_idx': i,
'device_name': 'CPU%d' % (i),
'debug_paths_stems' : self.debug_paths_stems
}
for i in range(min(multiprocessing.cpu_count(), 8)):
yield 'CPU%d' % (i), {}, {'debug_paths_stems' : self.debug_paths_stems}
#override
def get_no_process_started_message(self):
print ( 'Unable to start CPU processes.')
def on_clients_initialized(self):
io.progress_bar ("Searching deleted files", len (self.input_paths))
#override
def onHostGetProgressBarDesc(self):
return "Searching deleted files"
def on_clients_finalized(self):
io.progress_bar_close()
#override
def onHostGetProgressBarLen(self):
return len (self.input_paths)
#override
def onHostGetData(self, host_dict):
def get_data(self, host_dict):
if len (self.input_paths) > 0:
return [self.input_paths.pop(0)]
return None
#override
def onHostDataReturn (self, host_dict, data):
def on_data_return (self, host_dict, data):
self.input_paths.insert(0, data[0])
#override
def onClientInitialize(self, client_dict):
self.debug_paths_stems = client_dict['debug_paths_stems']
return None
#override
def onClientProcessData(self, data):
input_path_stem = Path(data[0]).stem
return any ( [ input_path_stem == d_stem for d_stem in self.debug_paths_stems] )
#override
def onClientGetDataName (self, data):
#return string identificator of your data
return data[0]
#override
def onHostResult (self, host_dict, data, result):
def on_result (self, host_dict, data, result):
if result == False:
self.result.append( data[0] )
return 1
io.progress_bar_inc(1)
#override
def onFinalizeAndGetResult(self):
def get_result(self):
return self.result
'''
detector
'dlib'
'mt'
'manual'
face_type
'full_face'
'avatar'
'''
def main (input_dir, output_dir, debug, detector='mt', multi_gpu=True, cpu_only=False, manual_fix=False, manual_output_debug_fix=False, manual_window_size=1368, image_size=256, face_type='full_face'):
print ("Running extractor.\r\n")
def main(input_dir,
output_dir,
debug=False,
detector='mt',
manual_fix=False,
manual_output_debug_fix=False,
manual_window_size=1368,
image_size=256,
face_type='full_face',
device_args={}):
input_path = Path(input_dir)
output_path = Path(output_dir)
face_type = FaceType.fromString(face_type)
multi_gpu = device_args.get('multi_gpu', False)
cpu_only = device_args.get('cpu_only', False)
if not input_path.exists():
print('Input directory not found. Please ensure it exists.')
return
raise ValueError('Input directory not found. Please ensure it exists.')
if output_path.exists():
if not manual_output_debug_fix:
for filename in Path_utils.get_image_paths(output_path):
@ -505,19 +529,17 @@ def main (input_dir, output_dir, debug, detector='mt', multi_gpu=True, cpu_only=
if manual_output_debug_fix:
debug = True
detector = 'manual'
print('Performing re-extract frames which were deleted from _debug directory.')
io.log_info('Performing re-extract frames which were deleted from _debug directory.')
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose=True)
input_path_image_paths = Path_utils.get_image_unique_filestem_paths(input_path, verbose_print_func=io.log_info)
if debug:
debug_output_path = Path(str(output_path) + '_debug')
if manual_output_debug_fix:
if not debug_output_path.exists():
print ("%s not found " % ( str(debug_output_path) ))
return
input_path_image_paths = DeletedFilesSearcherSubprocessor ( input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).process()
raise ValueError("%s not found " % ( str(debug_output_path) ))
input_path_image_paths = DeletedFilesSearcherSubprocessor (input_path_image_paths, Path_utils.get_image_paths(debug_output_path) ).run()
input_path_image_paths = sorted (input_path_image_paths)
else:
if debug_output_path.exists():
@ -530,29 +552,29 @@ def main (input_dir, output_dir, debug, detector='mt', multi_gpu=True, cpu_only=
faces_detected = 0
if images_found != 0:
if detector == 'manual':
print ('Performing manual extract...')
extracted_faces = ExtractSubprocessor ([ (filename,[]) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).process()
io.log_info ('Performing manual extract...')
extracted_faces = ExtractSubprocessor ([ (filename,[]) for filename in input_path_image_paths ], 'landmarks', image_size, face_type, debug, cpu_only=cpu_only, manual=True, manual_window_size=manual_window_size).run()
else:
print ('Performing 1st pass...')
extracted_rects = ExtractSubprocessor ([ (x,) for x in input_path_image_paths ], 'rects', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, detector=detector).process()
io.log_info ('Performing 1st pass...')
extracted_rects = ExtractSubprocessor ([ (x,) for x in input_path_image_paths ], 'rects', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, detector=detector).run()
print ('Performing 2nd pass...')
extracted_faces = ExtractSubprocessor (extracted_rects, 'landmarks', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).process()
io.log_info ('Performing 2nd pass...')
extracted_faces = ExtractSubprocessor (extracted_rects, 'landmarks', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run()
if manual_fix:
print ('Performing manual fix...')
io.log_info ('Performing manual fix...')
if all ( np.array ( [ len(data[1]) > 0 for data in extracted_faces] ) == True ):
print ('All faces are detected, manual fix not needed.')
io.log_info ('All faces are detected, manual fix not needed.')
else:
extracted_faces = ExtractSubprocessor (extracted_faces, 'landmarks', image_size, face_type, debug, manual=True, manual_window_size=manual_window_size).process()
extracted_faces = ExtractSubprocessor (extracted_faces, 'landmarks', image_size, face_type, debug, manual=True, manual_window_size=manual_window_size).run()
if len(extracted_faces) > 0:
print ('Performing 3rd pass...')
final_imgs_paths = ExtractSubprocessor (extracted_faces, 'final', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, output_path=output_path).process()
io.log_info ('Performing 3rd pass...')
final_imgs_paths = ExtractSubprocessor (extracted_faces, 'final', image_size, face_type, debug, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, output_path=output_path).run()
faces_detected = len(final_imgs_paths)
print('-------------------------')
print('Images found: %d' % (images_found) )
print('Faces detected: %d' % (faces_detected) )
print('-------------------------')
io.log_info ('-------------------------')
io.log_info ('Images found: %d' % (images_found) )
io.log_info ('Faces detected: %d' % (faces_detected) )
io.log_info ('-------------------------')

File diff suppressed because it is too large Load diff

View file

@ -1,32 +1,34 @@
import sys
import traceback
import queue
import colorsys
import threading
import time
import numpy as np
import itertools
from pathlib import Path
from utils import Path_utils
from utils import image_utils
import cv2
import models
from interact import interact as io
def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=15, debug=False, **in_options):
def trainerThread (s2c, c2s, args, device_args):
while True:
try:
training_data_src_path = Path(training_data_src_dir)
training_data_dst_path = Path(training_data_dst_dir)
model_path = Path(model_path)
training_data_src_path = Path( args.get('training_data_src_dir', '') )
training_data_dst_path = Path( args.get('training_data_dst_dir', '') )
model_path = Path( args.get('model_path', '') )
model_name = args.get('model_name', '')
save_interval_min = 15
debug = args.get('debug', '')
if not training_data_src_path.exists():
print( 'Training data src directory does not exist.')
return
io.log_err('Training data src directory does not exist.')
break
if not training_data_dst_path.exists():
print( 'Training data dst directory does not exist.')
return
io.log_err('Training data dst directory does not exist.')
break
if not model_path.exists():
model_path.mkdir(exist_ok=True)
@ -36,7 +38,7 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
training_data_src_path=training_data_src_path,
training_data_dst_path=training_data_dst_path,
debug=debug,
**in_options)
device_args=device_args)
is_reached_goal = model.is_reached_epoch_goal()
is_upd_save_time_after_train = False
@ -48,10 +50,10 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
def send_preview():
if not debug:
previews = model.get_previews()
output_queue.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } )
c2s.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } )
else:
previews = [( 'debug, press update for new', model.debug_one_epoch())]
output_queue.put ( {'op':'show', 'previews': previews} )
c2s.put ( {'op':'show', 'previews': previews} )
if model.is_first_run():
@ -59,11 +61,11 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
if model.get_target_epoch() != 0:
if is_reached_goal:
print ('Model already trained to target epoch. You can use preview.')
io.log_info('Model already trained to target epoch. You can use preview.')
else:
print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % ( model.get_target_epoch() ) )
io.log_info('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % ( model.get_target_epoch() ) )
else:
print('Starting. Press "Enter" to stop training and save model.')
io.log_info('Starting. Press "Enter" to stop training and save model.')
last_save_time = time.time()
@ -75,12 +77,12 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
#save resets plaidML programs, so upd last_save_time only after plaidML rebuild them
last_save_time = time.time()
print (loss_string, end='\r')
io.log_info (loss_string, end='\r')
if model.get_target_epoch() != 0 and model.is_reached_epoch_goal():
print ('Reached target epoch.')
io.log_info ('Reached target epoch.')
model_save()
is_reached_goal = True
print ('You can use preview now.')
io.log_info ('You can use preview now.')
if not is_reached_goal and (time.time() - last_save_time) >= save_interval_min*60:
last_save_time = time.time()
@ -95,8 +97,8 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
if debug:
time.sleep(0.005)
while not input_queue.empty():
input = input_queue.get()
while not s2c.empty():
input = s2c.get()
op = input['op']
if op == 'save':
model_save()
@ -120,131 +122,142 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
print ('Error: %s' % (str(e)))
traceback.print_exc()
break
output_queue.put ( {'op':'close'} )
c2s.put ( {'op':'close'} )
def previewThread (input_queue, output_queue):
previews = None
loss_history = None
selected_preview = 0
update_preview = False
is_showing = False
is_waiting_preview = False
show_last_history_epochs_count = 0
epoch = 0
while True:
if not input_queue.empty():
input = input_queue.get()
op = input['op']
if op == 'show':
is_waiting_preview = False
loss_history = input['loss_history'] if 'loss_history' in input.keys() else None
previews = input['previews'] if 'previews' in input.keys() else None
epoch = input['epoch'] if 'epoch' in input.keys() else 0
if previews is not None:
max_w = 0
max_h = 0
for (preview_name, preview_rgb) in previews:
(h, w, c) = preview_rgb.shape
max_h = max (max_h, h)
max_w = max (max_w, w)
max_size = 800
if max_h > max_size:
max_w = int( max_w / (max_h / max_size) )
max_h = max_size
#make all previews size equal
for preview in previews[:]:
(preview_name, preview_rgb) = preview
(h, w, c) = preview_rgb.shape
if h != max_h or w != max_w:
previews.remove(preview)
previews.append ( (preview_name, cv2.resize(preview_rgb, (max_w, max_h))) )
selected_preview = selected_preview % len(previews)
update_preview = True
elif op == 'close':
break
if update_preview:
update_preview = False
selected_preview_name = previews[selected_preview][0]
selected_preview_rgb = previews[selected_preview][1]
(h,w,c) = selected_preview_rgb.shape
# HEAD
head_lines = [
'[s]:save [enter]:exit',
'[p]:update [space]:next preview [l]:change history range',
'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) )
]
head_line_height = 15
head_height = len(head_lines) * head_line_height
head = np.ones ( (head_height,w,c) ) * 0.1
for i in range(0, len(head_lines)):
t = i*head_line_height
b = (i+1)*head_line_height
head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=[0.8]*c )
final = head
if loss_history is not None:
if show_last_history_epochs_count == 0:
loss_history_to_show = loss_history
else:
loss_history_to_show = loss_history[-show_last_history_epochs_count:]
lh_img = models.ModelBase.get_loss_history_preview(loss_history_to_show, epoch, w, c)
final = np.concatenate ( [final, lh_img], axis=0 )
final = np.concatenate ( [final, selected_preview_rgb], axis=0 )
final = np.clip(final, 0, 1)
cv2.imshow ( 'Training preview', (final*255).astype(np.uint8) )
is_showing = True
if is_showing:
key = cv2.waitKey(100)
else:
time.sleep(0.1)
key = 0
if key == ord('\n') or key == ord('\r'):
output_queue.put ( {'op': 'close'} )
elif key == ord('s'):
output_queue.put ( {'op': 'save'} )
elif key == ord('p'):
if not is_waiting_preview:
is_waiting_preview = True
output_queue.put ( {'op': 'preview'} )
elif key == ord('l'):
if show_last_history_epochs_count == 0:
show_last_history_epochs_count = 5000
elif show_last_history_epochs_count == 5000:
show_last_history_epochs_count = 10000
elif show_last_history_epochs_count == 10000:
show_last_history_epochs_count = 50000
elif show_last_history_epochs_count == 50000:
show_last_history_epochs_count = 100000
elif show_last_history_epochs_count == 100000:
show_last_history_epochs_count = 0
update_preview = True
elif key == ord(' '):
selected_preview = (selected_preview + 1) % len(previews)
update_preview = True
cv2.destroyAllWindows()
def main(args, device_args):
io.log_info ("Running trainer.\r\n")
def main (training_data_src_dir, training_data_dst_dir, model_path, model_name, **in_options):
print ("Running trainer.\r\n")
no_preview = args.get('no_preview', False)
output_queue = queue.Queue()
input_queue = queue.Queue()
import threading
thread = threading.Thread(target=trainerThread, args=(output_queue, input_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name), kwargs=in_options )
s2c = queue.Queue()
c2s = queue.Queue()
thread = threading.Thread(target=trainerThread, args=(s2c, c2s, args, device_args) )
thread.start()
previewThread (input_queue, output_queue)
if no_preview:
while True:
if not c2s.empty():
input = c2s.get()
op = input.get('op','')
if op == 'close':
break
io.process_messages(0.1)
else:
wnd_name = "Training preview"
io.named_window(wnd_name)
io.capture_keys(wnd_name)
previews = None
loss_history = None
selected_preview = 0
update_preview = False
is_showing = False
is_waiting_preview = False
show_last_history_epochs_count = 0
epoch = 0
while True:
if not c2s.empty():
input = c2s.get()
op = input['op']
if op == 'show':
is_waiting_preview = False
loss_history = input['loss_history'] if 'loss_history' in input.keys() else None
previews = input['previews'] if 'previews' in input.keys() else None
epoch = input['epoch'] if 'epoch' in input.keys() else 0
if previews is not None:
max_w = 0
max_h = 0
for (preview_name, preview_rgb) in previews:
(h, w, c) = preview_rgb.shape
max_h = max (max_h, h)
max_w = max (max_w, w)
max_size = 800
if max_h > max_size:
max_w = int( max_w / (max_h / max_size) )
max_h = max_size
#make all previews size equal
for preview in previews[:]:
(preview_name, preview_rgb) = preview
(h, w, c) = preview_rgb.shape
if h != max_h or w != max_w:
previews.remove(preview)
previews.append ( (preview_name, cv2.resize(preview_rgb, (max_w, max_h))) )
selected_preview = selected_preview % len(previews)
update_preview = True
elif op == 'close':
break
if update_preview:
update_preview = False
selected_preview_name = previews[selected_preview][0]
selected_preview_rgb = previews[selected_preview][1]
(h,w,c) = selected_preview_rgb.shape
# HEAD
head_lines = [
'[s]:save [enter]:exit',
'[p]:update [space]:next preview [l]:change history range',
'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) )
]
head_line_height = 15
head_height = len(head_lines) * head_line_height
head = np.ones ( (head_height,w,c) ) * 0.1
for i in range(0, len(head_lines)):
t = i*head_line_height
b = (i+1)*head_line_height
head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=[0.8]*c )
final = head
if loss_history is not None:
if show_last_history_epochs_count == 0:
loss_history_to_show = loss_history
else:
loss_history_to_show = loss_history[-show_last_history_epochs_count:]
lh_img = models.ModelBase.get_loss_history_preview(loss_history_to_show, epoch, w, c)
final = np.concatenate ( [final, lh_img], axis=0 )
final = np.concatenate ( [final, selected_preview_rgb], axis=0 )
final = np.clip(final, 0, 1)
io.show_image( wnd_name, (final*255).astype(np.uint8) )
is_showing = True
key_events = io.get_key_events(wnd_name)
key, = key_events[-1] if len(key_events) > 0 else (0,)
if key == ord('\n') or key == ord('\r'):
s2c.put ( {'op': 'close'} )
elif key == ord('s'):
s2c.put ( {'op': 'save'} )
elif key == ord('p'):
if not is_waiting_preview:
is_waiting_preview = True
s2c.put ( {'op': 'preview'} )
elif key == ord('l'):
if show_last_history_epochs_count == 0:
show_last_history_epochs_count = 5000
elif show_last_history_epochs_count == 5000:
show_last_history_epochs_count = 10000
elif show_last_history_epochs_count == 10000:
show_last_history_epochs_count = 50000
elif show_last_history_epochs_count == 50000:
show_last_history_epochs_count = 100000
elif show_last_history_epochs_count == 100000:
show_last_history_epochs_count = 0
update_preview = True
elif key == ord(' '):
selected_preview = (selected_preview + 1) % len(previews)
update_preview = True
io.process_messages(0.1)
io.destroy_all_windows()

View file

@ -1,28 +1,21 @@
import os
import sys
import operator
import numpy as np
import cv2
from tqdm import tqdm
from shutil import copyfile
import cv2
from pathlib import Path
from utils import Path_utils
from utils import image_utils
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from utils.cv2_utils import *
from facelib import LandmarksProcessor
from utils.SubprocessorBase import SubprocessorBase
import multiprocessing
from interact import interact as io
def convert_png_to_jpg_file (filepath):
filepath = Path(filepath)
if filepath.suffix != '.png':
return
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
return
dflpng = DFLPNG.load (str(filepath) )
if dflpng is None:
print ("%s is not a dfl image file" % (filepath.name) )
return
dfl_dict = dflpng.getDFLDictData()
@ -45,27 +38,30 @@ def convert_png_to_jpg_folder (input_path):
print ("Converting PNG to JPG...\r\n")
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Converting", ascii=True):
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Converting"):
filepath = Path(filepath)
convert_png_to_jpg_file(filepath)
def add_landmarks_debug_images(input_path):
print ("Adding landmarks debug images...")
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Processing", ascii=True):
for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Processing"):
filepath = Path(filepath)
img = cv2_imread(str(filepath))
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
dflimg = DFLPNG.load( str(filepath) )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
dflimg = DFLJPG.load ( str(filepath) )
else:
dflimg = None
if dflimg is None:
print ("%s is not a dfl image file" % (filepath.name) )
continue
if not (dflimg is None or img is None):
if img is not None:
face_landmarks = dflimg.get_landmarks()
LandmarksProcessor.draw_landmarks(img, face_landmarks)

View file

@ -7,31 +7,34 @@ from pathlib import Path
from utils import Path_utils
from utils import std_utils
from utils import image_utils
from utils.console_utils import *
from utils.cv2_utils import *
import numpy as np
import cv2
from samples import SampleGeneratorBase
from nnlib import nnlib
from interact import interact as io
'''
You can implement your own model. Check examples.
'''
class ModelBase(object):
#DONT OVERRIDE
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug = False, force_gpu_idx=-1, **in_options):
if force_gpu_idx == -1:
def __init__(self, model_path, training_data_src_path=None, training_data_dst_path=None, debug = False, device_args = None):
device_args['force_gpu_idx'] = device_args.get('force_gpu_idx',-1)
if device_args['force_gpu_idx'] == -1:
idxs_names_list = nnlib.device.getValidDevicesIdxsWithNamesList()
if len(idxs_names_list) > 1:
print ("You have multi GPUs in a system: ")
io.log_info ("You have multi GPUs in a system: ")
for idx, name in idxs_names_list:
print ("[%d] : %s" % (idx, name) )
io.log_info ("[%d] : %s" % (idx, name) )
force_gpu_idx = input_int("Which GPU idx to choose? ( skip: best GPU ) : ", -1, [ x[0] for x in idxs_names_list] )
self.force_gpu_idx = force_gpu_idx
device_args['force_gpu_idx'] = io.input_int("Which GPU idx to choose? ( skip: best GPU ) : ", -1, [ x[0] for x in idxs_names_list] )
self.device_args = device_args
print ("Loading model...")
io.log_info ("Loading model...")
self.model_path = model_path
self.model_data_path = Path( self.get_strpath_storage_for_file('data.dat') )
@ -46,9 +49,7 @@ class ModelBase(object):
self.dst_data_generator = None
self.debug = debug
self.is_training_mode = (training_data_src_path is not None and training_data_dst_path is not None)
self.supress_std_once = os.environ.get('TF_SUPPRESS_STD', '0') == '1'
self.epoch = 0
self.options = {}
self.loss_history = []
@ -61,40 +62,40 @@ class ModelBase(object):
self.loss_history = model_data['loss_history'] if 'loss_history' in model_data.keys() else []
self.sample_for_preview = model_data['sample_for_preview'] if 'sample_for_preview' in model_data.keys() else None
ask_override = self.is_training_mode and self.epoch != 0 and input_in_time ("Press enter in 2 seconds to override model settings.", 2)
ask_override = self.is_training_mode and self.epoch != 0 and io.input_in_time ("Press enter in 2 seconds to override model settings.", 2)
if self.epoch == 0:
print ("\nModel first run. Enter model options as default for each run.")
io.log_info ("\nModel first run. Enter model options as default for each run.")
if self.epoch == 0 or ask_override:
default_write_preview_history = False if self.epoch == 0 else self.options.get('write_preview_history',False)
self.options['write_preview_history'] = input_bool("Write preview history? (y/n ?:help skip:n/default) : ", default_write_preview_history, help_message="Preview history will be writed to <ModelName>_history folder.")
self.options['write_preview_history'] = io.input_bool("Write preview history? (y/n ?:help skip:n/default) : ", default_write_preview_history, help_message="Preview history will be writed to <ModelName>_history folder.")
else:
self.options['write_preview_history'] = self.options.get('write_preview_history', False)
if self.epoch == 0 or ask_override:
self.options['target_epoch'] = max(0, input_int("Target epoch (skip:unlimited/default) : ", 0))
self.options['target_epoch'] = max(0, io.input_int("Target epoch (skip:unlimited/default) : ", 0))
else:
self.options['target_epoch'] = self.options.get('target_epoch', 0)
if self.epoch == 0 or ask_override:
default_batch_size = 0 if self.epoch == 0 else self.options.get('batch_size',0)
self.options['batch_size'] = max(0, input_int("Batch_size (?:help skip:0/default) : ", default_batch_size, help_message="Larger batch size is always better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."))
self.options['batch_size'] = max(0, io.input_int("Batch_size (?:help skip:0/default) : ", default_batch_size, help_message="Larger batch size is always better for NN's generalization, but it can cause Out of Memory error. Tune this value for your videocard manually."))
else:
self.options['batch_size'] = self.options.get('batch_size', 0)
if self.epoch == 0:
self.options['sort_by_yaw'] = input_bool("Feed faces to network sorted by yaw? (y/n ?:help skip:n) : ", False, help_message="NN will not learn src face directions that don't match dst face directions." )
self.options['sort_by_yaw'] = io.input_bool("Feed faces to network sorted by yaw? (y/n ?:help skip:n) : ", False, help_message="NN will not learn src face directions that don't match dst face directions." )
else:
self.options['sort_by_yaw'] = self.options.get('sort_by_yaw', False)
if self.epoch == 0:
self.options['random_flip'] = input_bool("Flip faces randomly? (y/n ?:help skip:y) : ", True, help_message="Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset.")
self.options['random_flip'] = io.input_bool("Flip faces randomly? (y/n ?:help skip:y) : ", True, help_message="Predicted face will look more naturally without this option, but src faceset should cover all face directions as dst faceset.")
else:
self.options['random_flip'] = self.options.get('random_flip', True)
if self.epoch == 0:
self.options['src_scale_mod'] = np.clip( input_int("Src face scale modifier % ( -30...30, ?:help skip:0) : ", 0, help_message="If src face shape is wider than dst, try to decrease this value to get a better result."), -30, 30)
self.options['src_scale_mod'] = np.clip( io.input_int("Src face scale modifier % ( -30...30, ?:help skip:0) : ", 0, help_message="If src face shape is wider than dst, try to decrease this value to get a better result."), -30, 30)
else:
self.options['src_scale_mod'] = self.options.get('src_scale_mod', 0)
@ -116,10 +117,10 @@ class ModelBase(object):
self.onInitializeOptions(self.epoch == 0, ask_override)
nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, force_gpu_idx=self.force_gpu_idx, **in_options) )
nnlib.import_all ( nnlib.DeviceConfig(allow_growth=False, **self.device_args) )
self.device_config = nnlib.active_DeviceConfig
self.onInitialize(**in_options)
self.onInitialize()
self.options['batch_size'] = self.batch_size
@ -128,10 +129,10 @@ class ModelBase(object):
if self.is_training_mode:
if self.write_preview_history:
if self.force_gpu_idx == -1:
if self.device_args['force_gpu_idx'] == -1:
self.preview_history_path = self.model_path / ( '%s_history' % (self.get_model_name()) )
else:
self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.force_gpu_idx, self.get_model_name()) )
self.preview_history_path = self.model_path / ( '%d_%s_history' % (self.device_args['force_gpu_idx'], self.get_model_name()) )
if not self.preview_history_path.exists():
self.preview_history_path.mkdir(exist_ok=True)
@ -141,11 +142,11 @@ class ModelBase(object):
Path(filename).unlink()
if self.generator_list is None:
raise Exception( 'You didnt set_training_data_generators()')
raise ValueError( 'You didnt set_training_data_generators()')
else:
for i, generator in enumerate(self.generator_list):
if not isinstance(generator, SampleGeneratorBase):
raise Exception('training data generator is not subclass of SampleGeneratorBase')
raise ValueError('training data generator is not subclass of SampleGeneratorBase')
if (self.sample_for_preview is None) or (self.epoch == 0):
self.sample_for_preview = self.generate_next_sample()
@ -181,14 +182,14 @@ class ModelBase(object):
model_summary_text += ["========================="]
model_summary_text = "\r\n".join (model_summary_text)
self.model_summary_text = model_summary_text
print(model_summary_text)
io.log_info(model_summary_text)
#overridable
def onInitializeOptions(self, is_first_run, ask_override):
pass
#overridable
def onInitialize(self, **in_options):
def onInitialize(self):
'''
initialize your keras models
@ -221,10 +222,9 @@ class ModelBase(object):
return Path(inspect.getmodule(self).__file__).parent.name.rsplit("_", 1)[1]
#overridable
def get_converter(self, **in_options):
#return existing or your own converter which derived from base
from .ConverterBase import ConverterBase
return ConverterBase(self, **in_options)
def get_converter(self):
raise NotImplementeError
#return existing or your own converter which derived from base
def get_target_epoch(self):
return self.target_epoch
@ -258,17 +258,10 @@ class ModelBase(object):
return self.onGetPreview (self.sample_for_preview)[0][1] #first preview, and bgr
def save(self):
print ("Saving...")
io.log_info ("Saving...")
if self.supress_std_once:
supressor = std_utils.suppress_stdout_stderr()
supressor.__enter__()
Path( self.get_strpath_storage_for_file('summary.txt') ).write_text(self.model_summary_text)
self.onSave()
if self.supress_std_once:
supressor.__exit__()
model_data = {
'epoch': self.epoch,
@ -310,11 +303,7 @@ class ModelBase(object):
def generate_next_sample(self):
return [next(generator) for generator in self.generator_list]
def train_one_epoch(self):
if self.supress_std_once:
supressor = std_utils.suppress_stdout_stderr()
supressor.__enter__()
def train_one_epoch(self):
sample = self.generate_next_sample()
epoch_time = time.time()
losses = self.onTrainOneEpoch(sample, self.generator_list)
@ -322,11 +311,7 @@ class ModelBase(object):
self.last_sample = sample
self.loss_history.append ( [float(loss[1]) for loss in losses] )
if self.supress_std_once:
supressor.__exit__()
self.supress_std_once = False
if self.write_preview_history:
if self.epoch % 10 == 0:
preview = self.get_static_preview()
@ -377,10 +362,10 @@ class ModelBase(object):
return self.generator_list
def get_strpath_storage_for_file(self, filename):
if self.force_gpu_idx == -1:
if self.device_args['force_gpu_idx'] == -1:
return str( self.model_path / ( self.get_model_name() + '_' + filename) )
else:
return str( self.model_path / ( str(self.force_gpu_idx) + '_' + self.get_model_name() + '_' + filename) )
return str( self.model_path / ( str(self.device_args['force_gpu_idx']) + '_' + self.get_model_name() + '_' + filename) )
def set_vram_batch_requirements (self, d):
#example d = {2:2,3:4,4:8,5:16,6:32,7:32,8:32,9:48}

View file

@ -4,7 +4,7 @@ from nnlib import nnlib
from models import ModelBase
from facelib import FaceType
from samples import *
from utils.console_utils import *
from interact import interact as io
class Model(ModelBase):
@ -12,12 +12,12 @@ class Model(ModelBase):
def onInitializeOptions(self, is_first_run, ask_override):
if is_first_run or ask_override:
def_pixel_loss = self.options.get('pixel_loss', False)
self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
else:
self.options['pixel_loss'] = self.options.get('pixel_loss', False)
#override
def onInitialize(self, **in_options):
def onInitialize(self):
exec(nnlib.import_all(), locals(), globals())
self.set_vram_batch_requirements( {4.5:4} )
@ -113,15 +113,14 @@ class Model(ModelBase):
return np.concatenate ( (x,mx), -1 )
#override
def get_converter(self, **in_options):
from models import ConverterMasked
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
output_size=128,
face_type=FaceType.FULL,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0,
**in_options)
base_blur_mask_modifier=0)
def Build(self, input_layer):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -4,14 +4,14 @@ from nnlib import nnlib
from models import ModelBase
from facelib import FaceType
from samples import *
from utils.console_utils import *
from interact import interact as io
class Model(ModelBase):
#override
def onInitializeOptions(self, is_first_run, ask_override):
if is_first_run:
self.options['lighter_ae'] = input_bool ("Use lightweight autoencoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight autoencoder is faster, requires less VRAM, sacrificing overall quality. If your GPU VRAM <= 4, you should to choose this option.")
self.options['lighter_ae'] = io.input_bool ("Use lightweight autoencoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight autoencoder is faster, requires less VRAM, sacrificing overall quality. If your GPU VRAM <= 4, you should to choose this option.")
else:
default_lighter_ae = self.options.get('created_vram_gb', 99) <= 4 #temporally support old models, deprecate in future
if 'created_vram_gb' in self.options.keys():
@ -20,12 +20,12 @@ class Model(ModelBase):
if is_first_run or ask_override:
def_pixel_loss = self.options.get('pixel_loss', False)
self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
else:
self.options['pixel_loss'] = self.options.get('pixel_loss', False)
#override
def onInitialize(self, **in_options):
def onInitialize(self):
exec(nnlib.import_all(), locals(), globals())
self.set_vram_batch_requirements( {2.5:4} )
@ -125,15 +125,14 @@ class Model(ModelBase):
return np.concatenate ( (x,mx), -1 )
#override
def get_converter(self, **in_options):
from models import ConverterMasked
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
output_size=128,
face_type=FaceType.HALF,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100,
**in_options)
base_blur_mask_modifier=100)
def Build(self, lighter_ae):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -4,14 +4,14 @@ from nnlib import nnlib
from models import ModelBase
from facelib import FaceType
from samples import *
from utils.console_utils import *
from interact import interact as io
class Model(ModelBase):
#override
def onInitializeOptions(self, is_first_run, ask_override):
if is_first_run:
self.options['lighter_ae'] = input_bool ("Use lightweight autoencoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight autoencoder is faster, requires less VRAM, sacrificing overall quality. If your GPU VRAM <= 4, you should to choose this option.")
self.options['lighter_ae'] = io.input_bool ("Use lightweight autoencoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight autoencoder is faster, requires less VRAM, sacrificing overall quality. If your GPU VRAM <= 4, you should to choose this option.")
else:
default_lighter_ae = self.options.get('created_vram_gb', 99) <= 4 #temporally support old models, deprecate in future
if 'created_vram_gb' in self.options.keys():
@ -20,12 +20,12 @@ class Model(ModelBase):
if is_first_run or ask_override:
def_pixel_loss = self.options.get('pixel_loss', False)
self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
else:
self.options['pixel_loss'] = self.options.get('pixel_loss', False)
#override
def onInitialize(self, **in_options):
def onInitialize(self):
exec(nnlib.import_all(), locals(), globals())
self.set_vram_batch_requirements( {1.5:4} )
@ -127,15 +127,14 @@ class Model(ModelBase):
return np.concatenate ( (x,mx), -1 )
#override
def get_converter(self, **in_options):
from models import ConverterMasked
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=64,
output_size=64,
face_type=FaceType.HALF,
base_erode_mask_modifier=100,
base_blur_mask_modifier=100,
**in_options)
base_blur_mask_modifier=100)
def Build(self, lighter_ae):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -4,7 +4,7 @@ from nnlib import nnlib
from models import ModelBase
from facelib import FaceType
from samples import *
from utils.console_utils import *
from interact import interact as io
class Model(ModelBase):
@ -12,12 +12,12 @@ class Model(ModelBase):
def onInitializeOptions(self, is_first_run, ask_override):
if is_first_run or ask_override:
def_pixel_loss = self.options.get('pixel_loss', False)
self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 20k epochs to enhance fine details and decrease face jitter.")
else:
self.options['pixel_loss'] = self.options.get('pixel_loss', False)
#override
def onInitialize(self, **in_options):
def onInitialize(self):
exec(nnlib.import_all(), locals(), globals())
self.set_vram_batch_requirements( {4.5:4} )
@ -121,15 +121,14 @@ class Model(ModelBase):
return np.concatenate ( (x,mx), -1 )
#override
def get_converter(self, **in_options):
from models import ConverterMasked
def get_converter(self):
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=128,
output_size=128,
face_type=FaceType.FULL,
base_erode_mask_modifier=30,
base_blur_mask_modifier=0,
**in_options)
base_blur_mask_modifier=0)
def Build(self, input_layer):
exec(nnlib.code_import_all, locals(), globals())

View file

@ -4,7 +4,7 @@ from nnlib import nnlib
from models import ModelBase
from facelib import FaceType
from samples import *
from utils.console_utils import *
from interact import interact as io
#SAE - Styled AutoEncoder
class SAEModel(ModelBase):
@ -27,15 +27,15 @@ class SAEModel(ModelBase):
default_face_type = 'f'
if is_first_run:
resolution = input_int("Resolution ( 64-256 ?:help skip:128) : ", default_resolution, help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
resolution = io.input_int("Resolution ( 64-256 ?:help skip:128) : ", default_resolution, help_message="More resolution requires more VRAM and time to train. Value will be adjusted to multiple of 16.")
resolution = np.clip (resolution, 64, 256)
while np.modf(resolution / 16)[0] != 0.0:
resolution -= 1
self.options['resolution'] = resolution
self.options['face_type'] = input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower()
self.options['learn_mask'] = input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted. Model with style values can be learned without mask and produce same quality result.")
self.options['archi'] = input_str ("AE architecture (df, liae, vg ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae','vg'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'vg' - currently testing.").lower()
self.options['face_type'] = io.input_str ("Half or Full face? (h/f, ?:help skip:f) : ", default_face_type, ['h','f'], help_message="Half face has better resolution, but covers less area of cheeks.").lower()
self.options['learn_mask'] = io.input_bool ("Learn mask? (y/n, ?:help skip:y) : ", True, help_message="Learning mask can help model to recognize face directions. Learn without mask can reduce model size, in this case converter forced to use 'not predicted mask' that is not smooth as predicted. Model with style values can be learned without mask and produce same quality result.")
self.options['archi'] = io.input_str ("AE architecture (df, liae, vg ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae','vg'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'vg' - currently testing.").lower()
else:
self.options['resolution'] = self.options.get('resolution', default_resolution)
self.options['face_type'] = self.options.get('face_type', default_face_type)
@ -45,17 +45,17 @@ class SAEModel(ModelBase):
default_ae_dims = 256 if self.options['archi'] == 'liae' else 512
default_ed_ch_dims = 42
if is_first_run:
self.options['ae_dims'] = np.clip ( input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
self.options['ed_ch_dims'] = np.clip ( input_int("Encoder/Decoder dims per channel (21-85 ?:help skip:%d) : " % (default_ed_ch_dims) , default_ed_ch_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 )
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
self.options['ed_ch_dims'] = np.clip ( io.input_int("Encoder/Decoder dims per channel (21-85 ?:help skip:%d) : " % (default_ed_ch_dims) , default_ed_ch_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 )
else:
self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims)
self.options['ed_ch_dims'] = self.options.get('ed_ch_dims', default_ed_ch_dims)
if is_first_run:
self.options['lighter_encoder'] = input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")
self.options['lighter_encoder'] = io.input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")
if self.options['archi'] != 'vg':
self.options['multiscale_decoder'] = input_bool ("Use multiscale decoder? (y/n, ?:help skip:y) : ", True, help_message="Multiscale decoder helps to get better details.")
self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:y) : ", True, help_message="Multiscale decoder helps to get better details.")
else:
self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)
@ -66,14 +66,14 @@ class SAEModel(ModelBase):
default_bg_style_power = 0.0
if is_first_run or ask_override:
def_pixel_loss = self.options.get('pixel_loss', False)
self.options['pixel_loss'] = input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 15-25k epochs to enhance fine details and decrease face jitter.")
self.options['pixel_loss'] = io.input_bool ("Use pixel loss? (y/n, ?:help skip: n/default ) : ", def_pixel_loss, help_message="Default DSSIM loss good for initial understanding structure of faces. Use pixel loss after 15-25k epochs to enhance fine details and decrease face jitter.")
default_face_style_power = default_face_style_power if is_first_run else self.options.get('face_style_power', default_face_style_power)
self.options['face_style_power'] = np.clip ( input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power,
self.options['face_style_power'] = np.clip ( io.input_number("Face style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_face_style_power), default_face_style_power,
help_message="Learn to transfer face style details such as light and color conditions. Warning: Enable it only after 10k epochs, when predicted face is clear enough to start learn style. Start from 0.1 value and check history changes."), 0.0, 100.0 )
default_bg_style_power = default_bg_style_power if is_first_run else self.options.get('bg_style_power', default_bg_style_power)
self.options['bg_style_power'] = np.clip ( input_number("Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_bg_style_power), default_bg_style_power,
self.options['bg_style_power'] = np.clip ( io.input_number("Background style power ( 0.0 .. 100.0 ?:help skip:%.2f) : " % (default_bg_style_power), default_bg_style_power,
help_message="Learn to transfer image around face. This can make face more like dst."), 0.0, 100.0 )
else:
self.options['pixel_loss'] = self.options.get('pixel_loss', False)
@ -81,7 +81,7 @@ class SAEModel(ModelBase):
self.options['bg_style_power'] = self.options.get('bg_style_power', default_bg_style_power)
#override
def onInitialize(self, **in_options):
def onInitialize(self):
exec(nnlib.import_all(), locals(), globals())
SAEModel.initialize_nn_functions()
@ -427,9 +427,7 @@ class SAEModel(ModelBase):
return np.concatenate ( [prd[0], prd[1]], -1 )
#override
def get_converter(self, **in_options):
from models import ConverterMasked
def get_converter(self):
base_erode_mask_modifier = 30 if self.options['face_type'] == 'f' else 100
base_blur_mask_modifier = 0 if self.options['face_type'] == 'f' else 100
@ -439,6 +437,7 @@ class SAEModel(ModelBase):
face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF
from converters import ConverterMasked
return ConverterMasked(self.predictor_func,
predictor_input_size=self.options['resolution'],
output_size=self.options['resolution'],
@ -448,8 +447,7 @@ class SAEModel(ModelBase):
base_blur_mask_modifier=base_blur_mask_modifier,
default_erode_mask_modifier=default_erode_mask_modifier,
default_blur_mask_modifier=default_blur_mask_modifier,
clip_hborder_mask_per=0.0625 if self.options['face_type'] == 'f' else 0,
**in_options)
clip_hborder_mask_per=0.0625 if self.options['face_type'] == 'f' else 0)
@staticmethod
def initialize_nn_functions():

View file

@ -1,7 +1,4 @@
from .ModelBase import ModelBase
from .ConverterBase import ConverterBase
from .ConverterMasked import ConverterMasked
from .ConverterImage import ConverterImage
def import_model(name):
module = __import__('Model_'+name, globals(), locals(), [], 1)

View file

@ -2,7 +2,6 @@ import traceback
from enum import IntEnum
import cv2
import numpy as np
from tqdm import tqdm
from pathlib import Path
from utils import Path_utils
@ -14,6 +13,7 @@ from .Sample import SampleType
from facelib import FaceType
from facelib import LandmarksProcessor
from interact import interact as io
class SampleLoader:
cache = dict()
@ -29,7 +29,7 @@ class SampleLoader:
if sample_type == SampleType.IMAGE:
if datas[sample_type] is None:
datas[sample_type] = [ Sample(filename=filename) for filename in tqdm( Path_utils.get_image_paths(samples_path), desc="Loading", ascii=True ) ]
datas[sample_type] = [ Sample(filename=filename) for filename in io.progress_bar_generator( Path_utils.get_image_paths(samples_path), "Loading") ]
elif sample_type == SampleType.FACE:
if datas[sample_type] is None:
@ -55,16 +55,17 @@ class SampleLoader:
def upgradeToFaceSamples ( samples ):
sample_list = []
for s in tqdm( samples, desc="Loading", ascii=True ):
for s in io.progress_bar_generator(samples, "Loading"):
s_filename_path = Path(s.filename)
try:
if s_filename_path.suffix == '.png':
dflimg = DFLPNG.load ( str(s_filename_path), print_on_no_embedded_data=True )
if dflimg is None: continue
dflimg = DFLPNG.load ( str(s_filename_path) )
elif s_filename_path.suffix == '.jpg':
dflimg = DFLJPG.load ( str(s_filename_path), print_on_no_embedded_data=True )
if dflimg is None: continue
dflimg = DFLJPG.load ( str(s_filename_path) )
else:
dflimg = None
if dflimg is None:
print ("%s is not a dfl image file required for training" % (s_filename_path.name) )
continue
@ -87,7 +88,7 @@ class SampleLoader:
yaw_samples_len = len(yaw_samples)
sample_list = []
for i in tqdm( range(yaw_samples_len), desc="Sorting", ascii=True ):
for i in io.progress_bar_generator( range(yaw_samples_len), "Sorting"):
if yaw_samples[i] is not None:
for s in yaw_samples[i]:
s_t = []
@ -123,7 +124,7 @@ class SampleLoader:
yaws_sample_list = [None]*gradations
for i in tqdm( range(0, gradations), desc="Sorting", ascii=True ):
for i in io.progress_bar_generator(range(gradations), "Sorting"):
yaw = lowest_yaw + i*diff_rot_per_grad
next_yaw = lowest_yaw + (i+1)*diff_rot_per_grad

View file

@ -113,7 +113,7 @@ class DFLJPG(object):
return None
@staticmethod
def load(filename, print_on_no_embedded_data=False, throw_on_no_embedded_data=False):
def load(filename):
inst = DFLJPG.load_raw (filename)
inst.dfl_dict = None
@ -141,10 +141,6 @@ class DFLJPG(object):
inst.dfl_dict['face_type'] = FaceType.toString (FaceType.FULL)
if inst.dfl_dict == None:
if print_on_no_embedded_data:
print ( "No DFL data found in %s" % (filename) )
if throw_on_no_embedded_data:
raise ValueError("No DFL data found in %s" % (filename) )
return None
return inst

View file

@ -248,7 +248,7 @@ class DFLPNG(object):
return inst
@staticmethod
def load(filename, print_on_no_embedded_data=False, throw_on_no_embedded_data=False):
def load(filename):
inst = DFLPNG.load_raw (filename)
inst.fcwp_dict = inst.getDFLDictData()
@ -256,10 +256,6 @@ class DFLPNG(object):
inst.fcwp_dict['face_type'] = FaceType.toString (FaceType.FULL)
if inst.fcwp_dict == None:
if print_on_no_embedded_data:
print ( "No DFL data found in %s" % (filename) )
if throw_on_no_embedded_data:
raise ValueError("No DFL data found in %s" % (filename) )
return None
return inst

View file

@ -13,7 +13,7 @@ def get_image_paths(dir_path):
result.append(x.path)
return result
def get_image_unique_filestem_paths(dir_path, verbose=False):
def get_image_unique_filestem_paths(dir_path, verbose_print_func=None):
result = get_image_paths(dir_path)
result_dup = set()
@ -21,8 +21,8 @@ def get_image_unique_filestem_paths(dir_path, verbose=False):
f_stem = Path(f).stem
if f_stem in result_dup:
result.remove(f)
if verbose:
print ("Duplicate filenames are not allowed, skipping: %s" % Path(f).name )
if verbose_print_func is not None:
verbose_print_func ("Duplicate filenames are not allowed, skipping: %s" % Path(f).name )
continue
result_dup.add(f_stem)

View file

@ -1,257 +0,0 @@
import traceback
from tqdm import tqdm
import multiprocessing
import time
import sys
class SubprocessorBase(object):
#overridable
def __init__(self, name, no_response_time_sec = 60):
self.name = name
self.no_response_time_sec = no_response_time_sec
self.is_host = True
#overridable
def process_info_generator(self):
#yield name, host_dict, client_dict - per process
yield 'first process', {}, {}
#overridable
def get_no_process_started_message(self):
return "No process started."
#overridable
def onHostGetProgressBarDesc(self):
return "Processing"
#overridable
def onHostGetProgressBarLen(self):
return 0
#overridable
def onHostGetData(self, host_dict):
#return data here
return None
#overridable
def onHostDataReturn (self, host_dict, data):
#input_data.insert(0, obj['data'])
pass
#overridable
def onClientInitialize(self, client_dict):
#return fail message or None if ok
return None
#overridable
def onClientFinalize(self):
pass
#overridable
def onClientProcessData(self, data):
#return result object
return None
#overridable
def onClientGetDataName (self, data):
#return string identificator of your data
return "undefined"
#overridable
def onHostClientsInitialized(self):
pass
#overridable
def onHostResult (self, host_dict, data, result):
#return count of progress bar update
return 1
#overridable
def onFinalizeAndGetResult(self):
return None
def inc_progress_bar(self, c):
if self.is_host:
self.progress_bar.n += c
self.progress_bar.refresh()
else:
self.cq.put ( {'op': 'inc_bar', 'c':c} )
def safe_print(self, msg):
self.print_lock.acquire()
print (msg)
self.print_lock.release()
def process(self):
#returns start_return
self.processes = []
self.print_lock = multiprocessing.Lock()
for name, host_dict, client_dict in self.process_info_generator():
sq = multiprocessing.Queue()
cq = multiprocessing.Queue()
client_dict.update ( {'print_lock' : self.print_lock} )
try:
p = multiprocessing.Process(target=self.subprocess, args=(sq,cq,client_dict))
p.daemon = True
p.start()
self.processes.append ( { 'process' : p,
'sq' : sq,
'cq' : cq,
'state' : 'busy',
'sent_time': time.time(),
'name': name,
'host_dict' : host_dict
} )
except:
print ("Unable to start subprocess %s" % (name))
if len(self.processes) == 0:
raise Exception ("Unable to start Subprocessor '%s' " % (self.name))
while True:
for p in self.processes[:]:
while not p['cq'].empty():
obj = p['cq'].get()
obj_op = obj['op']
if obj_op == 'init_ok':
p['state'] = 'free'
elif obj_op == 'error':
if obj['close'] == True:
p['process'].terminate()
p['process'].join()
self.processes.remove(p)
break
if all ([ p['state'] == 'free' for p in self.processes ] ):
break
if len(self.processes) == 0:
print ( self.get_no_process_started_message() )
return None
self.progress_bar = tqdm( total=self.onHostGetProgressBarLen(), desc=self.onHostGetProgressBarDesc(), ascii=True )
self.onHostClientsInitialized()
try:
while True:
for p in self.processes[:]:
while not p['cq'].empty():
obj = p['cq'].get()
obj_op = obj['op']
if obj_op == 'success':
data = obj['data']
result = obj['result']
c = self.onHostResult (p['host_dict'], data, result)
if c > 0:
self.progress_bar.update(c)
p['state'] = 'free'
elif obj_op == 'inc_bar':
self.inc_progress_bar(obj['c'])
elif obj_op == 'error':
if 'data' in obj.keys():
self.onHostDataReturn (p['host_dict'], obj['data'] )
if obj['close'] == True:
p['sq'].put ( {'op': 'close'} )
p['process'].join()
self.processes.remove(p)
break
p['state'] = 'free'
for p in self.processes[:]:
if p['state'] == 'free':
data = self.onHostGetData(p['host_dict'])
if data is not None:
p['sq'].put ( {'op': 'data', 'data' : data} )
p['sent_time'] = time.time()
p['sent_data'] = data
p['state'] = 'busy'
elif p['state'] == 'busy':
if self.no_response_time_sec != 0 and (time.time() - p['sent_time']) > self.no_response_time_sec:
print ( '%s doesnt response, terminating it.' % (p['name']) )
self.onHostDataReturn (p['host_dict'], p['sent_data'] )
p['process'].terminate()
self.processes.remove(p)
if all ([p['state'] == 'free' for p in self.processes]):
break
time.sleep(0.005)
except:
print ("Exception occured in Subprocessor.start(): %s" % (traceback.format_exc()) )
self.progress_bar.close()
for p in self.processes[:]:
p['sq'].put ( {'op': 'close'} )
p['sent_time'] = time.time()
while True:
for p in self.processes[:]:
terminate_it = False
while not p['cq'].empty():
obj = p['cq'].get()
obj_op = obj['op']
if obj_op == 'finalized':
terminate_it = True
break
if self.no_response_time_sec != 0 and (time.time() - p['sent_time']) > self.no_response_time_sec:
terminate_it = True
if terminate_it:
p['state'] = 'finalized'
p['process'].terminate()
if all ([p['state'] == 'finalized' for p in self.processes]):
break
return self.onFinalizeAndGetResult()
def subprocess(self, sq, cq, client_dict):
self.is_host = False
self.print_lock = client_dict['print_lock']
self.cq = cq
try:
fail_message = self.onClientInitialize(client_dict)
except:
fail_message = 'Exception while initialization: %s' % (traceback.format_exc())
if fail_message is None:
cq.put ( {'op': 'init_ok'} )
else:
print (fail_message)
cq.put ( {'op': 'error', 'close': True} )
return
while True:
obj = sq.get()
obj_op = obj['op']
if obj_op == 'data':
data = obj['data']
try:
result = self.onClientProcessData (data)
cq.put ( {'op': 'success', 'data' : data, 'result' : result} )
except:
print ( 'Exception while process data [%s]: %s' % (self.onClientGetDataName(data), traceback.format_exc()) )
cq.put ( {'op': 'error', 'close': True, 'data' : data } )
elif obj_op == 'close':
break
time.sleep(0.005)
self.onClientFinalize()
cq.put ( {'op': 'finalized'} )
while True:
time.sleep(0.1)

View file

@ -1,100 +0,0 @@
import os
import sys
import time
import multiprocessing
def input_number(s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = float(inp)
if (valid_list is not None) and (i not in valid_list):
return default_value
return i
except:
print (default_value)
return default_value
def input_int(s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
i = int(inp)
if (valid_list is not None) and (i not in valid_list):
return default_value
return i
except:
print (default_value)
return default_value
def input_bool(s, default_value, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
return bool ( {"y":True,"n":False,"1":True,"0":False}.get(inp.lower(), default_value) )
except:
print ( "y" if default_value else "n" )
return default_value
def input_str(s, default_value, valid_list=None, help_message=None):
while True:
try:
inp = input(s)
if len(inp) == 0:
raise ValueError("")
if help_message is not None and inp == '?':
print (help_message)
continue
if (valid_list is not None) and (inp.lower() not in valid_list):
return default_value
return inp
except:
print (default_value)
return default_value
def input_process(stdin_fd, sq, str):
sys.stdin = os.fdopen(stdin_fd)
try:
inp = input (str)
sq.put (True)
except:
sq.put (False)
def input_in_time (str, max_time_sec):
sq = multiprocessing.Queue()
p = multiprocessing.Process(target=input_process, args=( sys.stdin.fileno(), sq, str))
p.start()
t = time.time()
inp = False
while True:
if not sq.empty():
inp = sq.get()
break
if time.time() - t > max_time_sec:
break
p.terminate()
sys.stdin = os.fdopen( sys.stdin.fileno() )
return inp