mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 21:12:07 -07:00
提供自動合併機制。
This commit is contained in:
parent
335f887ba9
commit
a767b6790b
4 changed files with 98 additions and 37 deletions
23
.vscode/launch.json
vendored
23
.vscode/launch.json
vendored
|
@ -38,7 +38,8 @@
|
||||||
"--training-data-src-dir", "D:\\DeepFaceLab\\workspace\\data_src\\aligned",
|
"--training-data-src-dir", "D:\\DeepFaceLab\\workspace\\data_src\\aligned",
|
||||||
"--training-data-dst-dir", "D:\\DeepFaceLab\\workspace\\data_dst\\aligned",
|
"--training-data-dst-dir", "D:\\DeepFaceLab\\workspace\\data_dst\\aligned",
|
||||||
"--model-dir", "D:\\DeepFaceLab\\workspace\\model",
|
"--model-dir", "D:\\DeepFaceLab\\workspace\\model",
|
||||||
"--model", "SAEHD"
|
"--model", "SAEHD",
|
||||||
|
"--silent-start"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -63,6 +64,26 @@
|
||||||
"--force-gpu-idxs", "0"
|
"--force-gpu-idxs", "0"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "DFL mp4",
|
||||||
|
"subProcess": true,
|
||||||
|
"justMyCode": true,
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "D:\\DeepFaceLab\\_internal\\DeepFaceLab\\main.py",
|
||||||
|
"python": "D:\\DeepFaceLab\\_internal\\python-3.6.8\\python.exe",
|
||||||
|
"cwd": "D:\\DeepFaceLab\\workspace",
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"gevent": true,
|
||||||
|
"args": ["videoed",
|
||||||
|
"video-from-sequence",
|
||||||
|
"--input-dir", "data_dst\\merged",
|
||||||
|
"--output-file", "result.mp4",
|
||||||
|
"--reference-file", "data_dst.*",
|
||||||
|
"--bitrate", "16",
|
||||||
|
"--include-audio"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "Auto DFL",
|
"name": "Auto DFL",
|
||||||
"subProcess": true,
|
"subProcess": true,
|
||||||
|
|
|
@ -2,8 +2,9 @@ import ymauto.MergeDefault as MD
|
||||||
|
|
||||||
md = MD.MergeArgs("config.json")
|
md = MD.MergeArgs("config.json")
|
||||||
|
|
||||||
print(md.g("sandy", "name"))
|
print(md.gOrder("mask_mode", "mask_mode_opts"))
|
||||||
print(md.g("beauty", "deep", "gender"))
|
# print(md.g("sandy", "name"))
|
||||||
|
# print(md.g("beauty", "deep", "gender"))
|
||||||
# print(md.g("version1"))
|
# print(md.g("version1"))
|
||||||
# print(md.g("deep", "method"))
|
# print(md.g("deep", "method"))
|
||||||
# print(md.g("deep", "methoda", "PP"))
|
# print(md.g("deep", "methoda", "PP"))
|
||||||
|
|
|
@ -71,19 +71,19 @@ def main (model_class_name=None,
|
||||||
place_model_on_cpu=True,
|
place_model_on_cpu=True,
|
||||||
run_on_cpu=run_on_cpu)
|
run_on_cpu=run_on_cpu)
|
||||||
|
|
||||||
if md.g(None, "interactive") is None:
|
if md.g("interactive") is None:
|
||||||
is_interactive = io.input_bool ("Use interactive merger?", True) if not io.is_colab() else False
|
is_interactive = io.input_bool ("Use interactive merger?", True) if not io.is_colab() else False
|
||||||
else:
|
else:
|
||||||
is_interactive = md.g(None, "interactive")
|
is_interactive = md.g("interactive")
|
||||||
|
|
||||||
if not is_interactive:
|
if not is_interactive:
|
||||||
cfg.ask_settings()
|
cfg.ask_settings()
|
||||||
|
|
||||||
if md.g(None, "subprocess_count") is None:
|
if md.gd("NoSet", "subprocess_count") == "NoSet":
|
||||||
subprocess_count = io.input_int("Number of workers?", max(8, multiprocessing.cpu_count()),
|
subprocess_count = io.input_int("Number of workers?", max(8, multiprocessing.cpu_count()),
|
||||||
valid_range=[1, multiprocessing.cpu_count()], help_message="Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores." )
|
valid_range=[1, multiprocessing.cpu_count()], help_message="Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores." )
|
||||||
else:
|
else:
|
||||||
subprocess_count = md.g(4, "subprocess_count")
|
subprocess_count = md.gd(4, "subprocess_count")
|
||||||
|
|
||||||
input_path_image_paths = pathex.get_image_paths(input_path)
|
input_path_image_paths = pathex.get_image_paths(input_path)
|
||||||
|
|
||||||
|
|
|
@ -34,11 +34,14 @@ class MergerConfig(object):
|
||||||
|
|
||||||
#overridable
|
#overridable
|
||||||
def ask_settings(self):
|
def ask_settings(self):
|
||||||
s = """Choose sharpen mode: \n"""
|
if md.g("sharpen_mode") is None:
|
||||||
for key in self.sharpen_dict.keys():
|
s = """Choose sharpen mode: \n"""
|
||||||
s += f"""({key}) {self.sharpen_dict[key]}\n"""
|
for key in self.sharpen_dict.keys():
|
||||||
io.log_info(s)
|
s += f"""({key}) {self.sharpen_dict[key]}\n"""
|
||||||
self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.")
|
io.log_info(s)
|
||||||
|
self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.")
|
||||||
|
else:
|
||||||
|
self.sharpen_mode = md.gOrder("sharpen_mode", "sharpen_mode_opts")
|
||||||
|
|
||||||
if self.sharpen_mode != 0:
|
if self.sharpen_mode != 0:
|
||||||
self.blursharpen_amount = np.clip ( io.input_int ("Choose blur/sharpen amount", 0, add_info="-100..100"), -100, 100 )
|
self.blursharpen_amount = np.clip ( io.input_int ("Choose blur/sharpen amount", 0, add_info="-100..100"), -100, 100 )
|
||||||
|
@ -191,15 +194,17 @@ class MergerConfigMasked(MergerConfig):
|
||||||
self.bicubic_degrade_power = np.clip ( self.bicubic_degrade_power+diff, 0, 100)
|
self.bicubic_degrade_power = np.clip ( self.bicubic_degrade_power+diff, 0, 100)
|
||||||
|
|
||||||
def ask_settings(self):
|
def ask_settings(self):
|
||||||
print("mode: " + md.g('mode'))
|
|
||||||
|
|
||||||
s = """Choose mode: \n"""
|
if md.g("mode") is None:
|
||||||
for key in mode_dict.keys():
|
s = """Choose mode: \n"""
|
||||||
s += f"""({key}) {mode_dict[key]}\n"""
|
for key in mode_dict.keys():
|
||||||
io.log_info(s)
|
s += f"""({key}) {mode_dict[key]}\n"""
|
||||||
mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) )
|
io.log_info(s)
|
||||||
|
mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) )
|
||||||
|
|
||||||
self.mode = mode_dict.get (mode, self.default_mode )
|
self.mode = mode_dict.get (mode, self.default_mode )
|
||||||
|
else:
|
||||||
|
self.mode = md.g("mode")
|
||||||
|
|
||||||
if 'raw' not in self.mode:
|
if 'raw' not in self.mode:
|
||||||
if self.mode == 'hist-match':
|
if self.mode == 'hist-match':
|
||||||
|
@ -208,31 +213,65 @@ class MergerConfigMasked(MergerConfig):
|
||||||
if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
|
if self.mode == 'hist-match' or self.mode == 'seamless-hist-match':
|
||||||
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255)
|
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255)
|
||||||
|
|
||||||
s = """Choose mask mode: \n"""
|
if md.g("mask_mode") is None:
|
||||||
for key in mask_mode_dict.keys():
|
s = """Choose mask mode: \n"""
|
||||||
s += f"""({key}) {mask_mode_dict[key]}\n"""
|
for key in mask_mode_dict.keys():
|
||||||
io.log_info(s)
|
s += f"""({key}) {mask_mode_dict[key]}\n"""
|
||||||
self.mask_mode = io.input_int ("", 1, valid_list=mask_mode_dict.keys() )
|
io.log_info(s)
|
||||||
|
self.mask_mode = io.input_int ("", 1, valid_list=mask_mode_dict.keys() )
|
||||||
|
else:
|
||||||
|
self.mask_mode = md.gOrder("mask_mode", "mask_mode_opts")
|
||||||
|
|
||||||
if 'raw' not in self.mode:
|
if 'raw' not in self.mode:
|
||||||
self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400)
|
if md.g("erode_mask_modifier") is None:
|
||||||
self.blur_mask_modifier = np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400)
|
self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400)
|
||||||
self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100)
|
else:
|
||||||
|
self.erode_mask_modifier = md.g("erode_mask_modifier")
|
||||||
|
|
||||||
self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50)
|
if md.g("blur_mask_modifier") is None:
|
||||||
|
self.blur_mask_modifier = np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400)
|
||||||
|
else:
|
||||||
|
self.erode_mask_modifier = md.g("blur_mask_modifier")
|
||||||
|
|
||||||
|
if md.g("motion_blur_power") is None:
|
||||||
|
self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100)
|
||||||
|
else:
|
||||||
|
self.erode_mask_modifier = md.g("motion_blur_power")
|
||||||
|
|
||||||
|
if md.g("output_face_scale") is None:
|
||||||
|
self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50)
|
||||||
|
else:
|
||||||
|
self.output_face_scale = md.g("output_face_scale")
|
||||||
|
|
||||||
if 'raw' not in self.mode:
|
if 'raw' not in self.mode:
|
||||||
self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", None, valid_list=list(ctm_str_dict.keys())[1:] )
|
if md.g("color_transfer_mode") is None:
|
||||||
|
self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", None, valid_list=list(ctm_str_dict.keys())[1:] )
|
||||||
|
else:
|
||||||
|
self.color_transfer_mode = md.g("color_transfer_mode")
|
||||||
self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]
|
self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode]
|
||||||
|
|
||||||
super().ask_settings()
|
super().ask_settings()
|
||||||
|
|
||||||
self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100)
|
if md.g("super_resolution_power") is None:
|
||||||
|
self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100)
|
||||||
|
else:
|
||||||
|
self.super_resolution_power = md.g("super_resolution_power")
|
||||||
|
|
||||||
if 'raw' not in self.mode:
|
if 'raw' not in self.mode:
|
||||||
self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500)
|
if md.g("image_denoise_power") is None:
|
||||||
self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100)
|
self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500)
|
||||||
self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100)
|
else:
|
||||||
|
self.image_denoise_power = md.g("image_denoise_power")
|
||||||
|
|
||||||
|
if md.g("bicubic_degrade_power") is None:
|
||||||
|
self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100)
|
||||||
|
else:
|
||||||
|
self.bicubic_degrade_power = md.g("bicubic_degrade_power")
|
||||||
|
|
||||||
|
if md.g("color_degrade_power") is None:
|
||||||
|
self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100)
|
||||||
|
else:
|
||||||
|
self.color_degrade_power = md.g("color_degrade_power")
|
||||||
|
|
||||||
io.log_info ("")
|
io.log_info ("")
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue