added new model U-net Face Morpher.

removed AVATAR - useless model was just for demo
removed MIAEF128 - use UFM insted
removed LIAEF128YAW - use model option sort by yaw on start for any model
All models now ask some options on start.
Session options (such as target epoch, batch_size, write_preview_history etc) can be overrided by special command arg.
Converter now always ask options and no more support to define options via command line.
fix bug when ConverterMasked always used not predicted mask.
SampleGenerator now always generate samples with replicated border, exclude mask samples.
refactorings
This commit is contained in:
iperov 2019-01-02 17:26:12 +04:00
commit 7b70e7eec1
29 changed files with 673 additions and 1013 deletions

View file

@ -1,4 +1,6 @@
import traceback
import sys
import os
import traceback
from pathlib import Path
from utils import Path_utils
import cv2
@ -30,7 +32,9 @@ class model_process_predictor(object):
return obj['result']
time.sleep(0.005)
def model_process(model_name, model_dir, in_options, sq, cq):
def model_process(stdin_fd, model_name, model_dir, in_options, sq, cq):
sys.stdin = os.fdopen(stdin_fd)
try:
model_path = Path(model_dir)
@ -152,7 +156,7 @@ class ConvertSubprocessor(SubprocessorBase):
image = (cv2.imread(str(filename_path)) / 255.0).astype(np.float32)
if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
image = self.converter.convert_image(image, self.debug)
image = self.converter.convert_image(image, None, self.debug)
if self.debug:
for img in image:
cv2.imshow ('Debug convert', img )
@ -229,7 +233,7 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
model_sq = multiprocessing.Queue()
model_cq = multiprocessing.Queue()
model_lock = multiprocessing.Lock()
model_p = multiprocessing.Process(target=model_process, args=(model_name, model_dir, in_options, model_sq, model_cq))
model_p = multiprocessing.Process(target=model_process, args=( sys.stdin.fileno(), model_name, model_dir, in_options, model_sq, model_cq))
model_p.start()
while True:
@ -266,7 +270,39 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
alignments[ source_filename_stem ].append (dflpng.get_source_landmarks())
#interpolate landmarks
#from facelib import LandmarksProcessor
#from facelib import FaceType
#a = sorted(alignments.keys())
#a_len = len(a)
#
#box_pts = 3
#box = np.ones(box_pts)/box_pts
#for i in range( a_len ):
# if i >= box_pts and i <= a_len-box_pts-1:
# af0 = alignments[ a[i] ][0] ##first face
# m0 = LandmarksProcessor.get_transform_mat (af0, 256, face_type=FaceType.FULL)
#
# points = []
#
# for j in range(-box_pts, box_pts+1):
# af = alignments[ a[i+j] ][0] ##first face
# m = LandmarksProcessor.get_transform_mat (af, 256, face_type=FaceType.FULL)
# p = LandmarksProcessor.transform_points (af, m)
# points.append (p)
#
# points = np.array(points)
# points_len = len(points)
# t_points = np.transpose(points, [1,0,2])
#
# p1 = np.array ( [ int(np.convolve(x[:,0], box, mode='same')[points_len//2]) for x in t_points ] )
# p2 = np.array ( [ int(np.convolve(x[:,1], box, mode='same')[points_len//2]) for x in t_points ] )
#
# new_points = np.concatenate( [np.expand_dims(p1,-1),np.expand_dims(p2,-1)], -1 )
#
# alignments[ a[i] ][0] = LandmarksProcessor.transform_points (new_points, m0, True).astype(np.int32)
files_processed, faces_processed = ConvertSubprocessor (
converter = converter.copy_and_set_predictor( model_process_predictor(model_sq,model_cq,model_lock) ),
input_path_image_paths = Path_utils.get_image_paths(input_path),

View file

@ -11,7 +11,7 @@ from utils import Path_utils
from utils import image_utils
import cv2
def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=10, debug=False, target_epoch=0, **in_options):
def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=10, debug=False, **in_options):
while True:
try:
@ -29,8 +29,6 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
if not model_path.exists():
model_path.mkdir(exist_ok=True)
import models
model = models.import_model(model_name)(
@ -40,7 +38,7 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
debug=debug,
**in_options)
is_reached_goal = (target_epoch > 0 and model.get_epoch() >= target_epoch)
is_reached_goal = model.is_reached_epoch_goal()
def model_save():
if not debug and not is_reached_goal:
@ -58,11 +56,11 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
if model.is_first_run():
model_save()
if target_epoch != 0:
if model.get_target_epoch() != 0:
if is_reached_goal:
print ('Model already trained to target epoch. You can use preview.')
else:
print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % (target_epoch) )
print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % ( model.get_target_epoch() ) )
else:
print('Starting. Press "Enter" to stop training and save model.')
@ -73,7 +71,7 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
loss_string = model.train_one_epoch()
print (loss_string, end='\r')
if target_epoch != 0 and model.get_epoch() >= target_epoch:
if model.get_target_epoch() != 0 and model.is_reached_epoch_goal():
print ('Reached target epoch.')
model_save()
is_reached_goal = True