support preview or not

This commit is contained in:
plucky 2018-12-28 00:28:40 +08:00
commit 0b562a151e
3 changed files with 147 additions and 146 deletions

View file

@ -16,7 +16,7 @@ $ docker build -t deepfacelab-cpu -f Dockerfile.cpu .
``` ```
$ docker run -p 8888:8888 --hostname deepfacelab-cpu --name deepfacelab-cpu -v **your source path**:/srv deepfacelab-cpu $ docker run -p 8888:8888 --hostname deepfacelab-cpu --name deepfacelab-cpu -v **your source path**:/srv deepfacelab-cpu
# for example # for example
$ docker run -p 8888:8888 --hostname deepfacelab-cpu --name deepfacelab-cpu -v /Users/plucky/own/DeepFaceLab:/srv deepfacelab-cpu $ docker run -p 8888:8888 --hostname deepfacelab-cpu --name deepfacelab-cpu -v $PWD:/srv deepfacelab-cpu
``` ```
then you will see the log: then you will see the log:

123
main.py
View file

@ -20,19 +20,19 @@ def str2bool(v):
return False return False
else: else:
raise argparse.ArgumentTypeError('Boolean value expected.') raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__": if __name__ == "__main__":
os_utils.set_process_lowest_prio() os_utils.set_process_lowest_prio()
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--tf-suppress-std', action="store_true", dest="tf_suppress_std", default=False, help="Suppress tensorflow initialization info. May not works on some python builds such as anaconda python 3.6.4. If you can fix it, you are welcome.") parser.add_argument('--tf-suppress-std', action="store_true", dest="tf_suppress_std", default=False, help="Suppress tensorflow initialization info. May not works on some python builds such as anaconda python 3.6.4. If you can fix it, you are welcome.")
subparsers = parser.add_subparsers() subparsers = parser.add_subparsers()
def process_extract(arguments): def process_extract(arguments):
from mainscripts import Extractor from mainscripts import Extractor
Extractor.main ( Extractor.main (
input_dir=arguments.input_dir, input_dir=arguments.input_dir,
output_dir=arguments.output_dir, output_dir=arguments.output_dir,
debug=arguments.debug, debug=arguments.debug,
face_type=arguments.face_type, face_type=arguments.face_type,
detector=arguments.detector, detector=arguments.detector,
@ -41,43 +41,43 @@ if __name__ == "__main__":
manual_fix=arguments.manual_fix, manual_fix=arguments.manual_fix,
manual_window_size=arguments.manual_window_size manual_window_size=arguments.manual_window_size
) )
extract_parser = subparsers.add_parser( "extract", help="Extract the faces from a pictures.") extract_parser = subparsers.add_parser( "extract", help="Extract the faces from a pictures.")
extract_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.") extract_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
extract_parser.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the extracted files will be stored.") extract_parser.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the extracted files will be stored.")
extract_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Writes debug images to [output_dir]_debug\ directory.") extract_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Writes debug images to [output_dir]_debug\ directory.")
extract_parser.add_argument('--face-type', dest="face_type", choices=['half_face', 'full_face', 'head', 'avatar', 'mark_only'], default='full_face', help="Default 'full_face'. Don't change this option, currently all models uses 'full_face'") extract_parser.add_argument('--face-type', dest="face_type", choices=['half_face', 'full_face', 'head', 'avatar', 'mark_only'], default='full_face', help="Default 'full_face'. Don't change this option, currently all models uses 'full_face'")
extract_parser.add_argument('--detector', dest="detector", choices=['dlib','mt','manual'], default='dlib', help="Type of detector. Default 'dlib'. 'mt' (MTCNNv1) - faster, better, almost no jitter, perfect for gathering thousands faces for src-set. It is also good for dst-set, but can generate false faces in frames where main face not recognized! In this case for dst-set use either 'dlib' with '--manual-fix' or '--detector manual'. Manual detector suitable only for dst-set.") extract_parser.add_argument('--detector', dest="detector", choices=['dlib','mt','manual'], default='dlib', help="Type of detector. Default 'dlib'. 'mt' (MTCNNv1) - faster, better, almost no jitter, perfect for gathering thousands faces for src-set. It is also good for dst-set, but can generate false faces in frames where main face not recognized! In this case for dst-set use either 'dlib' with '--manual-fix' or '--detector manual'. Manual detector suitable only for dst-set.")
extract_parser.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="Enables multi GPU.") extract_parser.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="Enables multi GPU.")
extract_parser.add_argument('--manual-fix', action="store_true", dest="manual_fix", default=False, help="Enables manual extract only frames where faces were not recognized.") extract_parser.add_argument('--manual-fix', action="store_true", dest="manual_fix", default=False, help="Enables manual extract only frames where faces were not recognized.")
extract_parser.add_argument('--manual-window-size', type=int, dest="manual_window_size", default=0, help="Manual fix window size. Example: 1368. Default: frame size.") extract_parser.add_argument('--manual-window-size', type=int, dest="manual_window_size", default=0, help="Manual fix window size. Example: 1368. Default: frame size.")
extract_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Extract on CPU. Forces to use MT extractor.") extract_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Extract on CPU. Forces to use MT extractor.")
extract_parser.set_defaults (func=process_extract) extract_parser.set_defaults (func=process_extract)
def process_sort(arguments): def process_sort(arguments):
from mainscripts import Sorter from mainscripts import Sorter
Sorter.main (input_path=arguments.input_dir, sort_by_method=arguments.sort_by_method) Sorter.main (input_path=arguments.input_dir, sort_by_method=arguments.sort_by_method)
sort_parser = subparsers.add_parser( "sort", help="Sort faces in a directory.") sort_parser = subparsers.add_parser( "sort", help="Sort faces in a directory.")
sort_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.") sort_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
sort_parser.add_argument('--by', required=True, dest="sort_by_method", choices=("blur", "face", "face-dissim", "face-yaw", "hist", "hist-dissim", "brightness", "hue", "black", "origname"), help="Method of sorting. 'origname' sort by original filename to recover original sequence." ) sort_parser.add_argument('--by', required=True, dest="sort_by_method", choices=("blur", "face", "face-dissim", "face-yaw", "hist", "hist-dissim", "brightness", "hue", "black", "origname"), help="Method of sorting. 'origname' sort by original filename to recover original sequence." )
sort_parser.set_defaults (func=process_sort) sort_parser.set_defaults (func=process_sort)
def process_train(arguments): def process_train(arguments):
if 'DFL_TARGET_EPOCH' in os.environ.keys(): if 'DFL_TARGET_EPOCH' in os.environ.keys():
arguments.target_epoch = int ( os.environ['DFL_TARGET_EPOCH'] ) arguments.target_epoch = int ( os.environ['DFL_TARGET_EPOCH'] )
if 'DFL_BATCH_SIZE' in os.environ.keys(): if 'DFL_BATCH_SIZE' in os.environ.keys():
arguments.batch_size = int ( os.environ['DFL_BATCH_SIZE'] ) arguments.batch_size = int ( os.environ['DFL_BATCH_SIZE'] )
from mainscripts import Trainer from mainscripts import Trainer
Trainer.main ( Trainer.main (
training_data_src_dir=arguments.training_data_src_dir, training_data_src_dir=arguments.training_data_src_dir,
training_data_dst_dir=arguments.training_data_dst_dir, training_data_dst_dir=arguments.training_data_dst_dir,
model_path=arguments.model_dir, model_path=arguments.model_dir,
model_name=arguments.model_name, model_name=arguments.model_name,
debug = arguments.debug, debug = arguments.debug,
#**options #**options
@ -91,32 +91,33 @@ if __name__ == "__main__":
force_gpu_idxs = arguments.force_gpu_idxs, force_gpu_idxs = arguments.force_gpu_idxs,
cpu_only = arguments.cpu_only cpu_only = arguments.cpu_only
) )
train_parser = subparsers.add_parser( "train", help="Trainer") train_parser = subparsers.add_parser( "train", help="Trainer")
train_parser.add_argument('--training-data-src-dir', required=True, action=fixPathAction, dest="training_data_src_dir", help="Dir of src-set.") train_parser.add_argument('--training-data-src-dir', required=True, action=fixPathAction, dest="training_data_src_dir", help="Dir of src-set.")
train_parser.add_argument('--training-data-dst-dir', required=True, action=fixPathAction, dest="training_data_dst_dir", help="Dir of dst-set.") train_parser.add_argument('--training-data-dst-dir', required=True, action=fixPathAction, dest="training_data_dst_dir", help="Dir of dst-set.")
train_parser.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.") train_parser.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.")
train_parser.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model") train_parser.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model")
train_parser.add_argument('--write-preview-history', action="store_true", dest="write_preview_history", default=False, help="Enable write preview history.") train_parser.add_argument('--write-preview-history', action="store_true", dest="write_preview_history", default=False, help="Enable write preview history.")
train_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug training.") train_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug training.")
train_parser.add_argument('--batch-size', type=int, dest="batch_size", default=0, help="Model batch size. Default - auto. Environment variable: ODFS_BATCH_SIZE.") train_parser.add_argument('--preview', action="store_true", dest="preview", default=True, help="Show preview.")
train_parser.add_argument('--batch-size', type=int, dest="batch_size", default=0, help="Model batch size. Default - auto. Environment variable: ODFS_BATCH_SIZE.")
train_parser.add_argument('--target-epoch', type=int, dest="target_epoch", default=0, help="Train until target epoch. Default - unlimited. Environment variable: ODFS_TARGET_EPOCH.") train_parser.add_argument('--target-epoch', type=int, dest="target_epoch", default=0, help="Train until target epoch. Default - unlimited. Environment variable: ODFS_TARGET_EPOCH.")
train_parser.add_argument('--save-interval-min', type=int, dest="save_interval_min", default=10, help="Save interval in minutes. Default 10.") train_parser.add_argument('--save-interval-min', type=int, dest="save_interval_min", default=10, help="Save interval in minutes. Default 10.")
train_parser.add_argument('--choose-worst-gpu', action="store_true", dest="choose_worst_gpu", default=False, help="Choose worst GPU instead of best.") train_parser.add_argument('--choose-worst-gpu', action="store_true", dest="choose_worst_gpu", default=False, help="Choose worst GPU instead of best.")
train_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best(worst).") train_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best(worst).")
train_parser.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="MultiGPU option. It will select only same best(worst) GPU models.") train_parser.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="MultiGPU option. It will select only same best(worst) GPU models.")
train_parser.add_argument('--force-gpu-idxs', type=str, dest="force_gpu_idxs", default=None, help="Override final GPU idxs. Example: 0,1,2.") train_parser.add_argument('--force-gpu-idxs', type=str, dest="force_gpu_idxs", default=None, help="Override final GPU idxs. Example: 0,1,2.")
train_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.") train_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.")
train_parser.set_defaults (func=process_train) train_parser.set_defaults (func=process_train)
def process_convert(arguments): def process_convert(arguments):
if arguments.ask_for_params: if arguments.ask_for_params:
try: try:
mode = int ( input ("Choose mode: (1) hist match, (2) hist match bw, (3) seamless (default), (4) seamless hist match : ") ) mode = int ( input ("Choose mode: (1) hist match, (2) hist match bw, (3) seamless (default), (4) seamless hist match : ") )
except: except:
mode = 3 mode = 3
if mode == 1: if mode == 1:
arguments.mode = 'hist-match' arguments.mode = 'hist-match'
elif mode == 2: elif mode == 2:
@ -125,73 +126,73 @@ if __name__ == "__main__":
arguments.mode = 'seamless' arguments.mode = 'seamless'
elif mode == 4: elif mode == 4:
arguments.mode = 'seamless-hist-match' arguments.mode = 'seamless-hist-match'
if arguments.mode == 'hist-match' or arguments.mode == 'hist-match-bw': if arguments.mode == 'hist-match' or arguments.mode == 'hist-match-bw':
try: try:
arguments.masked_hist_match = bool ( {"1":True,"0":False}[input("Masked hist match? [0 or 1] (default 1) : ").lower()] ) arguments.masked_hist_match = bool ( {"1":True,"0":False}[input("Masked hist match? [0 or 1] (default 1) : ").lower()] )
except: except:
arguments.masked_hist_match = True arguments.masked_hist_match = True
if arguments.mode == 'hist-match' or arguments.mode == 'hist-match-bw' or arguments.mode == 'seamless-hist-match': if arguments.mode == 'hist-match' or arguments.mode == 'hist-match-bw' or arguments.mode == 'seamless-hist-match':
try: try:
hist_match_threshold = int ( input ("Hist match threshold. [0..255] (default - 255) : ") ) hist_match_threshold = int ( input ("Hist match threshold. [0..255] (default - 255) : ") )
arguments.hist_match_threshold = hist_match_threshold arguments.hist_match_threshold = hist_match_threshold
except: except:
arguments.hist_match_threshold = 255 arguments.hist_match_threshold = 255
try: try:
arguments.use_predicted_mask = bool ( {"1":True,"0":False}[input("Use predicted mask? [0 or 1] (default 1) : ").lower()] ) arguments.use_predicted_mask = bool ( {"1":True,"0":False}[input("Use predicted mask? [0 or 1] (default 1) : ").lower()] )
except: except:
arguments.use_predicted_mask = False arguments.use_predicted_mask = False
try: try:
arguments.erode_mask_modifier = int ( input ("Choose erode mask modifier [-200..200] (default 0) : ") ) arguments.erode_mask_modifier = int ( input ("Choose erode mask modifier [-200..200] (default 0) : ") )
except: except:
arguments.erode_mask_modifier = 0 arguments.erode_mask_modifier = 0
try: try:
arguments.blur_mask_modifier = int ( input ("Choose blur mask modifier [-200..200] (default 0) : ") ) arguments.blur_mask_modifier = int ( input ("Choose blur mask modifier [-200..200] (default 0) : ") )
except: except:
arguments.blur_mask_modifier = 0 arguments.blur_mask_modifier = 0
if arguments.mode == 'seamless' or arguments.mode == 'seamless-hist-match': if arguments.mode == 'seamless' or arguments.mode == 'seamless-hist-match':
try: try:
arguments.seamless_erode_mask_modifier = int ( input ("Choose seamless erode mask modifier [-100..100] (default 0) : ") ) arguments.seamless_erode_mask_modifier = int ( input ("Choose seamless erode mask modifier [-100..100] (default 0) : ") )
except: except:
arguments.seamless_erode_mask_modifier = 0 arguments.seamless_erode_mask_modifier = 0
try: try:
arguments.output_face_scale_modifier = int ( input ("Choose output face scale modifier [-50..50] (default 0) : ") ) arguments.output_face_scale_modifier = int ( input ("Choose output face scale modifier [-50..50] (default 0) : ") )
except: except:
arguments.output_face_scale_modifier = 0 arguments.output_face_scale_modifier = 0
try: try:
arguments.transfercolor = bool ( {"1":True,"0":False}[input("Transfer color from dst face to converted final face? [0 or 1] (default 0) : ").lower()] ) arguments.transfercolor = bool ( {"1":True,"0":False}[input("Transfer color from dst face to converted final face? [0 or 1] (default 0) : ").lower()] )
except: except:
arguments.transfercolor = False arguments.transfercolor = False
try: try:
arguments.final_image_color_degrade_power = int ( input ("Degrade color power of final image [0..100] (default 0) : ") ) arguments.final_image_color_degrade_power = int ( input ("Degrade color power of final image [0..100] (default 0) : ") )
except: except:
arguments.final_image_color_degrade_power = 0 arguments.final_image_color_degrade_power = 0
try: try:
arguments.alpha = bool ( {"1":True,"0":False}[input("Export png with alpha channel? [0..1] (default 0) : ").lower()] ) arguments.alpha = bool ( {"1":True,"0":False}[input("Export png with alpha channel? [0..1] (default 0) : ").lower()] )
except: except:
arguments.alpha = False arguments.alpha = False
arguments.erode_mask_modifier = np.clip ( int(arguments.erode_mask_modifier), -200, 200) arguments.erode_mask_modifier = np.clip ( int(arguments.erode_mask_modifier), -200, 200)
arguments.blur_mask_modifier = np.clip ( int(arguments.blur_mask_modifier), -200, 200) arguments.blur_mask_modifier = np.clip ( int(arguments.blur_mask_modifier), -200, 200)
arguments.seamless_erode_mask_modifier = np.clip ( int(arguments.seamless_erode_mask_modifier), -100, 100) arguments.seamless_erode_mask_modifier = np.clip ( int(arguments.seamless_erode_mask_modifier), -100, 100)
arguments.output_face_scale_modifier = np.clip ( int(arguments.output_face_scale_modifier), -50, 50) arguments.output_face_scale_modifier = np.clip ( int(arguments.output_face_scale_modifier), -50, 50)
from mainscripts import Converter from mainscripts import Converter
Converter.main ( Converter.main (
input_dir=arguments.input_dir, input_dir=arguments.input_dir,
output_dir=arguments.output_dir, output_dir=arguments.output_dir,
aligned_dir=arguments.aligned_dir, aligned_dir=arguments.aligned_dir,
model_dir=arguments.model_dir, model_dir=arguments.model_dir,
model_name=arguments.model_name, model_name=arguments.model_name,
debug = arguments.debug, debug = arguments.debug,
mode = arguments.mode, mode = arguments.mode,
masked_hist_match = arguments.masked_hist_match, masked_hist_match = arguments.masked_hist_match,
@ -207,36 +208,36 @@ if __name__ == "__main__":
force_best_gpu_idx = arguments.force_best_gpu_idx, force_best_gpu_idx = arguments.force_best_gpu_idx,
cpu_only = arguments.cpu_only cpu_only = arguments.cpu_only
) )
convert_parser = subparsers.add_parser( "convert", help="Converter") convert_parser = subparsers.add_parser( "convert", help="Converter")
convert_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.") convert_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
convert_parser.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the converted files will be stored.") convert_parser.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the converted files will be stored.")
convert_parser.add_argument('--aligned-dir', action=fixPathAction, dest="aligned_dir", help="Aligned directory. This is where the extracted of dst faces stored. Not used in AVATAR model.") convert_parser.add_argument('--aligned-dir', action=fixPathAction, dest="aligned_dir", help="Aligned directory. This is where the extracted of dst faces stored. Not used in AVATAR model.")
convert_parser.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.") convert_parser.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.")
convert_parser.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model") convert_parser.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model")
convert_parser.add_argument('--ask-for-params', action="store_true", dest="ask_for_params", default=False, help="Ask for params.") convert_parser.add_argument('--ask-for-params', action="store_true", dest="ask_for_params", default=False, help="Ask for params.")
convert_parser.add_argument('--mode', dest="mode", choices=['seamless','hist-match', 'hist-match-bw','seamless-hist-match'], default='seamless', help="Face overlaying mode. Seriously affects result.") convert_parser.add_argument('--mode', dest="mode", choices=['seamless','hist-match', 'hist-match-bw','seamless-hist-match'], default='seamless', help="Face overlaying mode. Seriously affects result.")
convert_parser.add_argument('--masked-hist-match', type=str2bool, nargs='?', const=True, default=True, help="True or False. Excludes background for hist match. Default - True.") convert_parser.add_argument('--masked-hist-match', type=str2bool, nargs='?', const=True, default=True, help="True or False. Excludes background for hist match. Default - True.")
convert_parser.add_argument('--hist-match-threshold', type=int, dest="hist_match_threshold", default=255, help="Hist match threshold. Decrease to hide artifacts of hist match. Valid range [0..255]. Default 255") convert_parser.add_argument('--hist-match-threshold', type=int, dest="hist_match_threshold", default=255, help="Hist match threshold. Decrease to hide artifacts of hist match. Valid range [0..255]. Default 255")
convert_parser.add_argument('--use-predicted-mask', action="store_true", dest="use_predicted_mask", default=True, help="Use predicted mask by model. Default - True.") convert_parser.add_argument('--use-predicted-mask', action="store_true", dest="use_predicted_mask", default=True, help="Use predicted mask by model. Default - True.")
convert_parser.add_argument('--erode-mask-modifier', type=int, dest="erode_mask_modifier", default=0, help="Automatic erode mask modifier. Valid range [-200..200].") convert_parser.add_argument('--erode-mask-modifier', type=int, dest="erode_mask_modifier", default=0, help="Automatic erode mask modifier. Valid range [-200..200].")
convert_parser.add_argument('--blur-mask-modifier', type=int, dest="blur_mask_modifier", default=0, help="Automatic blur mask modifier. Valid range [-200..200].") convert_parser.add_argument('--blur-mask-modifier', type=int, dest="blur_mask_modifier", default=0, help="Automatic blur mask modifier. Valid range [-200..200].")
convert_parser.add_argument('--seamless-erode-mask-modifier', type=int, dest="seamless_erode_mask_modifier", default=0, help="Automatic seamless erode mask modifier. Valid range [-200..200].") convert_parser.add_argument('--seamless-erode-mask-modifier', type=int, dest="seamless_erode_mask_modifier", default=0, help="Automatic seamless erode mask modifier. Valid range [-200..200].")
convert_parser.add_argument('--output-face-scale-modifier', type=int, dest="output_face_scale_modifier", default=0, help="Output face scale modifier. Valid range [-50..50].") convert_parser.add_argument('--output-face-scale-modifier', type=int, dest="output_face_scale_modifier", default=0, help="Output face scale modifier. Valid range [-50..50].")
convert_parser.add_argument('--final-image-color-degrade-power', type=int, dest="final_image_color_degrade_power", default=0, help="Degrades colors of final image to hide face problems. Valid range [0..100].") convert_parser.add_argument('--final-image-color-degrade-power', type=int, dest="final_image_color_degrade_power", default=0, help="Degrades colors of final image to hide face problems. Valid range [0..100].")
convert_parser.add_argument('--transfercolor', action="store_true", dest="transfercolor", default=False, help="Transfer color from dst face to converted final face.") convert_parser.add_argument('--transfercolor', action="store_true", dest="transfercolor", default=False, help="Transfer color from dst face to converted final face.")
convert_parser.add_argument('--alpha', action="store_true", dest="alpha", default=False, help="Embeds alpha channel of face mask to final PNG. Used in manual composing video by editors such as Sony Vegas or After Effects.") convert_parser.add_argument('--alpha', action="store_true", dest="alpha", default=False, help="Embeds alpha channel of face mask to final PNG. Used in manual composing video by editors such as Sony Vegas or After Effects.")
convert_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug converter.") convert_parser.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug converter.")
convert_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best.") convert_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best.")
convert_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Convert on CPU.") convert_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Convert on CPU.")
convert_parser.set_defaults(func=process_convert) convert_parser.set_defaults(func=process_convert)
def bad_args(arguments): def bad_args(arguments):
parser.print_help() parser.print_help()
exit(0) exit(0)
parser.set_defaults(func=bad_args) parser.set_defaults(func=bad_args)
arguments = parser.parse_args() arguments = parser.parse_args()
if arguments.tf_suppress_std: if arguments.tf_suppress_std:
os.environ['TF_SUPPRESS_STD'] = '1' os.environ['TF_SUPPRESS_STD'] = '1'

View file

@ -1,76 +1,76 @@
import sys import sys
import traceback import traceback
import queue import queue
import colorsys import colorsys
import time import time
import numpy as np import numpy as np
import itertools import itertools
from pathlib import Path from pathlib import Path
from utils import Path_utils from utils import Path_utils
from utils import image_utils from utils import image_utils
import cv2 import cv2
def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=10, debug=False, target_epoch=0, **in_options): def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=10, debug=False, target_epoch=0, **in_options):
while True: while True:
try: try:
training_data_src_path = Path(training_data_src_dir) training_data_src_path = Path(training_data_src_dir)
training_data_dst_path = Path(training_data_dst_dir) training_data_dst_path = Path(training_data_dst_dir)
model_path = Path(model_path) model_path = Path(model_path)
if not training_data_src_path.exists(): if not training_data_src_path.exists():
print( 'Training data src directory is not exists.') print( 'Training data src directory is not exists.')
return return
if not training_data_dst_path.exists(): if not training_data_dst_path.exists():
print( 'Training data dst directory is not exists.') print( 'Training data dst directory is not exists.')
return return
if not model_path.exists(): if not model_path.exists():
model_path.mkdir(exist_ok=True) model_path.mkdir(exist_ok=True)
import models
import models
model = models.import_model(model_name)( model = models.import_model(model_name)(
model_path, model_path,
training_data_src_path=training_data_src_path, training_data_src_path=training_data_src_path,
training_data_dst_path=training_data_dst_path, training_data_dst_path=training_data_dst_path,
debug=debug, debug=debug,
**in_options) **in_options)
is_reached_goal = (target_epoch > 0 and model.get_epoch() >= target_epoch) is_reached_goal = (target_epoch > 0 and model.get_epoch() >= target_epoch)
def model_save(): def model_save():
if not debug and not is_reached_goal: if not debug and not is_reached_goal:
model.save() model.save()
def send_preview(): def send_preview():
if not debug: if not debug:
previews = model.get_previews() previews = model.get_previews()
output_queue.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } ) output_queue.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } )
else: else:
previews = [( 'debug, press update for new', model.debug_one_epoch())] previews = [( 'debug, press update for new', model.debug_one_epoch())]
output_queue.put ( {'op':'show', 'previews': previews} ) output_queue.put ( {'op':'show', 'previews': previews} )
if model.is_first_run(): if model.is_first_run():
model_save() model_save()
if target_epoch != 0: if target_epoch != 0:
if is_reached_goal: if is_reached_goal:
print ('Model already trained to target epoch. You can use preview.') print ('Model already trained to target epoch. You can use preview.')
else: else:
print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % (target_epoch) ) print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % (target_epoch) )
else: else:
print('Starting. Press "Enter" to stop training and save model.') print('Starting. Press "Enter" to stop training and save model.')
last_save_time = time.time() last_save_time = time.time()
for i in itertools.count(0,1): for i in itertools.count(0,1):
if not debug: if not debug:
if not is_reached_goal: if not is_reached_goal:
loss_string = model.train_one_epoch() loss_string = model.train_one_epoch()
print (loss_string, end='\r') print (loss_string, end='\r')
if target_epoch != 0 and model.get_epoch() >= target_epoch: if target_epoch != 0 and model.get_epoch() >= target_epoch:
@ -80,39 +80,39 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
print ('You can use preview now.') print ('You can use preview now.')
if not is_reached_goal and (time.time() - last_save_time) >= save_interval_min*60: if not is_reached_goal and (time.time() - last_save_time) >= save_interval_min*60:
last_save_time = time.time() last_save_time = time.time()
model_save() model_save()
send_preview() send_preview()
if i==0: if i==0:
if is_reached_goal: if is_reached_goal:
model.pass_one_epoch() model.pass_one_epoch()
send_preview() send_preview()
if debug: if debug:
time.sleep(0.005) time.sleep(0.005)
while not input_queue.empty(): while not input_queue.empty():
input = input_queue.get() input = input_queue.get()
op = input['op'] op = input['op']
if op == 'save': if op == 'save':
model_save() model_save()
elif op == 'preview': elif op == 'preview':
if is_reached_goal: if is_reached_goal:
model.pass_one_epoch() model.pass_one_epoch()
send_preview() send_preview()
elif op == 'close': elif op == 'close':
model_save() model_save()
i = -1 i = -1
break break
if i == -1: if i == -1:
break break
model.finalize() model.finalize()
except Exception as e: except Exception as e:
print ('Error: %s' % (str(e))) print ('Error: %s' % (str(e)))
traceback.print_exc() traceback.print_exc()
@ -120,8 +120,8 @@ def trainerThread (input_queue, output_queue, training_data_src_dir, training_da
output_queue.put ( {'op':'close'} ) output_queue.put ( {'op':'close'} )
def previewThread (input_queue, output_queue): def previewThread (input_queue, output_queue):
previews = None previews = None
loss_history = None loss_history = None
selected_preview = 0 selected_preview = 0
@ -129,7 +129,7 @@ def previewThread (input_queue, output_queue):
is_showing = False is_showing = False
is_waiting_preview = False is_waiting_preview = False
epoch = 0 epoch = 0
while True: while True:
if not input_queue.empty(): if not input_queue.empty():
input = input_queue.get() input = input_queue.get()
op = input['op'] op = input['op']
@ -145,7 +145,7 @@ def previewThread (input_queue, output_queue):
(h, w, c) = preview_rgb.shape (h, w, c) = preview_rgb.shape
max_h = max (max_h, h) max_h = max (max_h, h)
max_w = max (max_w, w) max_w = max (max_w, w)
max_size = 800 max_size = 800
if max_h > max_size: if max_h > max_size:
max_w = int( max_w / (max_h / max_size) ) max_w = int( max_w / (max_h / max_size) )
@ -162,101 +162,101 @@ def previewThread (input_queue, output_queue):
update_preview = True update_preview = True
elif op == 'close': elif op == 'close':
break break
if update_preview: if update_preview:
update_preview = False update_preview = False
(h,w,c) = previews[0][1].shape (h,w,c) = previews[0][1].shape
selected_preview_name = previews[selected_preview][0] selected_preview_name = previews[selected_preview][0]
selected_preview_rgb = previews[selected_preview][1] selected_preview_rgb = previews[selected_preview][1]
# HEAD # HEAD
head_text_color = [0.8]*c head_text_color = [0.8]*c
head_lines = [ head_lines = [
'[s]:save [enter]:exit', '[s]:save [enter]:exit',
'[p]:update [space]:next preview', '[p]:update [space]:next preview',
'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) ) 'Preview: "%s" [%d/%d]' % (selected_preview_name,selected_preview+1, len(previews) )
] ]
head_line_height = 15 head_line_height = 15
head_height = len(head_lines) * head_line_height head_height = len(head_lines) * head_line_height
head = np.ones ( (head_height,w,c) ) * 0.1 head = np.ones ( (head_height,w,c) ) * 0.1
for i in range(0, len(head_lines)): for i in range(0, len(head_lines)):
t = i*head_line_height t = i*head_line_height
b = (i+1)*head_line_height b = (i+1)*head_line_height
head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=head_text_color ) head[t:b, 0:w] += image_utils.get_text_image ( (w,head_line_height,c) , head_lines[i], color=head_text_color )
final = head final = head
if loss_history is not None: if loss_history is not None:
# LOSS HISTORY # LOSS HISTORY
loss_history = np.array (loss_history) loss_history = np.array (loss_history)
lh_height = 100 lh_height = 100
lh_img = np.ones ( (lh_height,w,c) ) * 0.1 lh_img = np.ones ( (lh_height,w,c) ) * 0.1
loss_count = len(loss_history[0]) loss_count = len(loss_history[0])
lh_len = len(loss_history) lh_len = len(loss_history)
l_per_col = lh_len / w l_per_col = lh_len / w
plist_max = [ [ max (0.0, 0.0, *[ loss_history[i_ab][p] plist_max = [ [ max (0.0, 0.0, *[ loss_history[i_ab][p]
for i_ab in range( int(col*l_per_col), int((col+1)*l_per_col) ) for i_ab in range( int(col*l_per_col), int((col+1)*l_per_col) )
] ]
) )
for p in range(0,loss_count) for p in range(0,loss_count)
] ]
for col in range(0, w) for col in range(0, w)
] ]
plist_min = [ [ min (plist_max[col][p], plist_min = [ [ min (plist_max[col][p],
plist_max[col][p], plist_max[col][p],
*[ loss_history[i_ab][p] *[ loss_history[i_ab][p]
for i_ab in range( int(col*l_per_col), int((col+1)*l_per_col) ) for i_ab in range( int(col*l_per_col), int((col+1)*l_per_col) )
] ]
) )
for p in range(0,loss_count) for p in range(0,loss_count)
] ]
for col in range(0, w) for col in range(0, w)
] ]
plist_abs_max = np.mean(loss_history[ len(loss_history) // 5 : ]) * 2 plist_abs_max = np.mean(loss_history[ len(loss_history) // 5 : ]) * 2
if l_per_col >= 1.0: if l_per_col >= 1.0:
for col in range(0, w): for col in range(0, w):
for p in range(0,loss_count): for p in range(0,loss_count):
point_color = [1.0]*c point_color = [1.0]*c
point_color[0:3] = colorsys.hsv_to_rgb ( p * (1.0/loss_count), 1.0, 1.0 ) point_color[0:3] = colorsys.hsv_to_rgb ( p * (1.0/loss_count), 1.0, 1.0 )
ph_max = int ( (plist_max[col][p] / plist_abs_max) * (lh_height-1) ) ph_max = int ( (plist_max[col][p] / plist_abs_max) * (lh_height-1) )
ph_max = np.clip( ph_max, 0, lh_height-1 ) ph_max = np.clip( ph_max, 0, lh_height-1 )
ph_min = int ( (plist_min[col][p] / plist_abs_max) * (lh_height-1) ) ph_min = int ( (plist_min[col][p] / plist_abs_max) * (lh_height-1) )
ph_min = np.clip( ph_min, 0, lh_height-1 ) ph_min = np.clip( ph_min, 0, lh_height-1 )
for ph in range(ph_min, ph_max+1): for ph in range(ph_min, ph_max+1):
lh_img[ (lh_height-ph-1), col ] = point_color lh_img[ (lh_height-ph-1), col ] = point_color
lh_lines = 5 lh_lines = 5
lh_line_height = (lh_height-1)/lh_lines lh_line_height = (lh_height-1)/lh_lines
for i in range(0,lh_lines+1): for i in range(0,lh_lines+1):
lh_img[ int(i*lh_line_height), : ] = (0.8,)*c lh_img[ int(i*lh_line_height), : ] = (0.8,)*c
last_line_t = int((lh_lines-1)*lh_line_height) last_line_t = int((lh_lines-1)*lh_line_height)
last_line_b = int(lh_lines*lh_line_height) last_line_b = int(lh_lines*lh_line_height)
if epoch != 0: if epoch != 0:
lh_text = 'Loss history. Epoch: %d' % (epoch) lh_text = 'Loss history. Epoch: %d' % (epoch)
else: else:
lh_text = 'Loss history.' lh_text = 'Loss history.'
lh_img[last_line_t:last_line_b, 0:w] += image_utils.get_text_image ( (w,last_line_b-last_line_t,c), lh_text, color=head_text_color ) lh_img[last_line_t:last_line_b, 0:w] += image_utils.get_text_image ( (w,last_line_b-last_line_t,c), lh_text, color=head_text_color )
final = np.concatenate ( [final, lh_img], axis=0 ) final = np.concatenate ( [final, lh_img], axis=0 )
final = np.concatenate ( [final, selected_preview_rgb], axis=0 ) final = np.concatenate ( [final, selected_preview_rgb], axis=0 )
cv2.imshow ( 'Training preview', final) cv2.imshow ( 'Training preview', final)
is_showing = True is_showing = True
if is_showing: if is_showing:
key = cv2.waitKey(100) key = cv2.waitKey(100)
else: else:
@ -274,16 +274,16 @@ def previewThread (input_queue, output_queue):
elif key == ord(' '): elif key == ord(' '):
selected_preview = (selected_preview + 1) % len(previews) selected_preview = (selected_preview + 1) % len(previews)
update_preview = True update_preview = True
cv2.destroyAllWindows() cv2.destroyAllWindows()
def main (training_data_src_dir, training_data_dst_dir, model_path, model_name, **in_options): def main (training_data_src_dir, training_data_dst_dir, model_path, model_name,preview, **in_options):
print ("Running trainer.\r\n") print ("Running trainer.\r\n")
output_queue = queue.Queue() output_queue = queue.Queue()
input_queue = queue.Queue() input_queue = queue.Queue()
import threading import threading
thread = threading.Thread(target=trainerThread, args=(output_queue, input_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name), kwargs=in_options ) thread = threading.Thread(target=trainerThread, args=(output_queue, input_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name), kwargs=in_options )
thread.start() thread.start()
if preview:
previewThread (input_queue, output_queue) previewThread (input_queue, output_queue)