diff --git a/.vscode/launch.json b/.vscode/launch.json index 0118407..f9b461d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -38,7 +38,8 @@ "--training-data-src-dir", "D:\\DeepFaceLab\\workspace\\data_src\\aligned", "--training-data-dst-dir", "D:\\DeepFaceLab\\workspace\\data_dst\\aligned", "--model-dir", "D:\\DeepFaceLab\\workspace\\model", - "--model", "SAEHD" + "--model", "SAEHD", + "--silent-start" ] }, { @@ -63,6 +64,26 @@ "--force-gpu-idxs", "0" ] }, + { + "name": "DFL mp4", + "subProcess": true, + "justMyCode": true, + "type": "python", + "request": "launch", + "program": "D:\\DeepFaceLab\\_internal\\DeepFaceLab\\main.py", + "python": "D:\\DeepFaceLab\\_internal\\python-3.6.8\\python.exe", + "cwd": "D:\\DeepFaceLab\\workspace", + "console": "integratedTerminal", + "gevent": true, + "args": ["videoed", + "video-from-sequence", + "--input-dir", "data_dst\\merged", + "--output-file", "result.mp4", + "--reference-file", "data_dst.*", + "--bitrate", "16", + "--include-audio" + ] + }, { "name": "Auto DFL", "subProcess": true, diff --git a/autodfl.py b/autodfl.py index 97d68f3..7a3cbb9 100644 --- a/autodfl.py +++ b/autodfl.py @@ -2,9 +2,10 @@ import ymauto.MergeDefault as MD md = MD.MergeArgs("config.json") -print(md.g("sandy", "name")) -print(md.g("beauty", "deep", "gender")) +print(md.gOrder("mask_mode", "mask_mode_opts")) +# print(md.g("sandy", "name")) +# print(md.g("beauty", "deep", "gender")) # print(md.g("version1")) # print(md.g("deep", "method")) # print(md.g("deep", "methoda", "PP")) -# print([v + '真神' for v in md.g("array")]) +# print([v + '真神' for v in md.g("array")]) \ No newline at end of file diff --git a/mainscripts/Merger.py b/mainscripts/Merger.py index 84a7e91..ad36132 100644 --- a/mainscripts/Merger.py +++ b/mainscripts/Merger.py @@ -71,19 +71,19 @@ def main (model_class_name=None, place_model_on_cpu=True, run_on_cpu=run_on_cpu) - if md.g(None, "interactive") is None: + if md.g("interactive") is None: is_interactive = io.input_bool ("Use interactive merger?", True) if not io.is_colab() else False else: - is_interactive = md.g(None, "interactive") + is_interactive = md.g("interactive") if not is_interactive: cfg.ask_settings() - if md.g(None, "subprocess_count") is None: + if md.gd("NoSet", "subprocess_count") == "NoSet": subprocess_count = io.input_int("Number of workers?", max(8, multiprocessing.cpu_count()), valid_range=[1, multiprocessing.cpu_count()], help_message="Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores." ) else: - subprocess_count = md.g(4, "subprocess_count") + subprocess_count = md.gd(4, "subprocess_count") input_path_image_paths = pathex.get_image_paths(input_path) diff --git a/merger/MergerConfig.py b/merger/MergerConfig.py index 6c91f25..61f3e5e 100644 --- a/merger/MergerConfig.py +++ b/merger/MergerConfig.py @@ -33,12 +33,15 @@ class MergerConfig(object): return copy.copy(self) #overridable - def ask_settings(self): - s = """Choose sharpen mode: \n""" - for key in self.sharpen_dict.keys(): - s += f"""({key}) {self.sharpen_dict[key]}\n""" - io.log_info(s) - self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.") + def ask_settings(self): + if md.g("sharpen_mode") is None: + s = """Choose sharpen mode: \n""" + for key in self.sharpen_dict.keys(): + s += f"""({key}) {self.sharpen_dict[key]}\n""" + io.log_info(s) + self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.") + else: + self.sharpen_mode = md.gOrder("sharpen_mode", "sharpen_mode_opts") if self.sharpen_mode != 0: self.blursharpen_amount = np.clip ( io.input_int ("Choose blur/sharpen amount", 0, add_info="-100..100"), -100, 100 ) @@ -191,15 +194,17 @@ class MergerConfigMasked(MergerConfig): self.bicubic_degrade_power = np.clip ( self.bicubic_degrade_power+diff, 0, 100) def ask_settings(self): - print("mode: " + md.g('mode')) - - s = """Choose mode: \n""" - for key in mode_dict.keys(): - s += f"""({key}) {mode_dict[key]}\n""" - io.log_info(s) - mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) ) - - self.mode = mode_dict.get (mode, self.default_mode ) + + if md.g("mode") is None: + s = """Choose mode: \n""" + for key in mode_dict.keys(): + s += f"""({key}) {mode_dict[key]}\n""" + io.log_info(s) + mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) ) + + self.mode = mode_dict.get (mode, self.default_mode ) + else: + self.mode = md.g("mode") if 'raw' not in self.mode: if self.mode == 'hist-match': @@ -208,31 +213,65 @@ class MergerConfigMasked(MergerConfig): if self.mode == 'hist-match' or self.mode == 'seamless-hist-match': self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255) - s = """Choose mask mode: \n""" - for key in mask_mode_dict.keys(): - s += f"""({key}) {mask_mode_dict[key]}\n""" - io.log_info(s) - self.mask_mode = io.input_int ("", 1, valid_list=mask_mode_dict.keys() ) + if md.g("mask_mode") is None: + s = """Choose mask mode: \n""" + for key in mask_mode_dict.keys(): + s += f"""({key}) {mask_mode_dict[key]}\n""" + io.log_info(s) + self.mask_mode = io.input_int ("", 1, valid_list=mask_mode_dict.keys() ) + else: + self.mask_mode = md.gOrder("mask_mode", "mask_mode_opts") if 'raw' not in self.mode: - self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400) - self.blur_mask_modifier = np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400) - self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100) + if md.g("erode_mask_modifier") is None: + self.erode_mask_modifier = np.clip ( io.input_int ("Choose erode mask modifier", 0, add_info="-400..400"), -400, 400) + else: + self.erode_mask_modifier = md.g("erode_mask_modifier") + + if md.g("blur_mask_modifier") is None: + self.blur_mask_modifier = np.clip ( io.input_int ("Choose blur mask modifier", 0, add_info="0..400"), 0, 400) + else: + self.erode_mask_modifier = md.g("blur_mask_modifier") + + if md.g("motion_blur_power") is None: + self.motion_blur_power = np.clip ( io.input_int ("Choose motion blur power", 0, add_info="0..100"), 0, 100) + else: + self.erode_mask_modifier = md.g("motion_blur_power") - self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50) + if md.g("output_face_scale") is None: + self.output_face_scale = np.clip (io.input_int ("Choose output face scale modifier", 0, add_info="-50..50" ), -50, 50) + else: + self.output_face_scale = md.g("output_face_scale") if 'raw' not in self.mode: - self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", None, valid_list=list(ctm_str_dict.keys())[1:] ) + if md.g("color_transfer_mode") is None: + self.color_transfer_mode = io.input_str ( "Color transfer to predicted face", None, valid_list=list(ctm_str_dict.keys())[1:] ) + else: + self.color_transfer_mode = md.g("color_transfer_mode") self.color_transfer_mode = ctm_str_dict[self.color_transfer_mode] super().ask_settings() - self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100) + if md.g("super_resolution_power") is None: + self.super_resolution_power = np.clip ( io.input_int ("Choose super resolution power", 0, add_info="0..100", help_message="Enhance details by applying superresolution network."), 0, 100) + else: + self.super_resolution_power = md.g("super_resolution_power") if 'raw' not in self.mode: - self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500) - self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100) - self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100) + if md.g("image_denoise_power") is None: + self.image_denoise_power = np.clip ( io.input_int ("Choose image degrade by denoise power", 0, add_info="0..500"), 0, 500) + else: + self.image_denoise_power = md.g("image_denoise_power") + + if md.g("bicubic_degrade_power") is None: + self.bicubic_degrade_power = np.clip ( io.input_int ("Choose image degrade by bicubic rescale power", 0, add_info="0..100"), 0, 100) + else: + self.bicubic_degrade_power = md.g("bicubic_degrade_power") + + if md.g("color_degrade_power") is None: + self.color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image", 0, add_info="0..100"), 0, 100) + else: + self.color_degrade_power = md.g("color_degrade_power") io.log_info ("")