Merge pull request #30 from faceshiftlabs/build/commits-from-upstream

Build/commits from upstream
This commit is contained in:
Jeremy Hummel 2019-08-20 17:24:39 -07:00 committed by GitHub
commit 35a17511c3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 210 additions and 186 deletions

View file

@ -6,8 +6,6 @@
## **DeepFaceLab** is a tool that utilizes machine learning to replace faces in videos.
If you like this software, please consider a donation.
GOAL: next DeepFacelab update.
- ### [Gallery](doc/gallery/doc_gallery.md)
@ -16,8 +14,6 @@ GOAL: next DeepFacelab update.
[English (google translated)](doc/manual_en_google_translated.pdf)
[На русском](doc/manual_ru.pdf)
- ### [Prebuilt windows app](doc/doc_prebuilt_windows_app.md)
- ### Forks
@ -29,13 +25,3 @@ GOAL: next DeepFacelab update.
- ### [Ready to work facesets](doc/doc_ready_to_work_facesets.md)
- ### [Build and repository info](doc/doc_build_and_repository_info.md)
- ### Communication groups:
(Chinese) QQ group 951138799 for ML/AI experts
[deepfakes (Chinese)](https://deepfakescn.com)
[deepfakes (Chinese) (outdated) ](https://deepfakes.com.cn/)
[reddit (English)](https://www.reddit.com/r/GifFakes/new/)

View file

@ -33,6 +33,7 @@ class InteractBase(object):
self.key_events = {}
self.pg_bar = None
self.focus_wnd_name = None
self.error_log_line_prefix = '/!\\ '
def is_support_windows(self):
return False
@ -65,10 +66,22 @@ class InteractBase(object):
raise NotImplemented
def log_info(self, msg, end='\n'):
if self.pg_bar is not None:
try: # Attempt print before the pb
tqdm.write(msg)
return
except:
pass #Fallback to normal print upon failure
print (msg, end=end)
def log_err(self, msg, end='\n'):
print (msg, end=end)
if self.pg_bar is not None:
try: # Attempt print before the pb
tqdm.write(f'{self.error_log_line_prefix}{msg}')
return
except:
pass #Fallback to normal print upon failure
print (f'{self.error_log_line_prefix}{msg}', end=end)
def named_window(self, wnd_name):
if wnd_name not in self.named_windows:
@ -150,8 +163,11 @@ class InteractBase(object):
else: print("progress_bar not set.")
def progress_bar_generator(self, data, desc, leave=True):
for x in tqdm( data, desc=desc, leave=leave, ascii=True ):
self.pg_bar = tqdm( data, desc=desc, leave=leave, ascii=True )
for x in self.pg_bar:
yield x
self.pg_bar.close()
self.pg_bar = None
def process_messages(self, sleep_time=0):
self.on_process_messages(sleep_time)

View file

@ -12,7 +12,7 @@ import cv2
import models
from interact import interact as io
def trainerThread (s2c, c2s, args, device_args):
def trainerThread (s2c, c2s, e, args, device_args):
while True:
try:
start_time = time.time()
@ -66,6 +66,7 @@ def trainerThread (s2c, c2s, args, device_args):
else:
previews = [( 'debug, press update for new', model.debug_one_iter())]
c2s.put ( {'op':'show', 'previews': previews} )
e.set() #Set the GUI Thread as Ready
if model.is_first_run():
@ -190,9 +191,12 @@ def main(args, device_args):
s2c = queue.Queue()
c2s = queue.Queue()
thread = threading.Thread(target=trainerThread, args=(s2c, c2s, args, device_args) )
e = threading.Event()
thread = threading.Thread(target=trainerThread, args=(s2c, c2s, e, args, device_args) )
thread.start()
e.wait() #Wait for inital load to occur.
if no_preview:
while True:
if not c2s.empty():

View file

@ -283,36 +283,54 @@ class ModelBase(object):
else:
self.sample_for_preview = self.generate_next_sample()
self.last_sample = self.sample_for_preview
###Generate text summary of model hyperparameters
#Find the longest key name and value string. Used as column widths.
width_name = max([len(k) for k in self.options.keys()] + [17]) + 1 # Single space buffer to left edge. Minimum of 17, the length of the longest static string used "Current iteration"
width_value = max([len(str(x)) for x in self.options.values()] + [len(str(self.iter)), len(self.get_model_name())]) + 1 # Single space buffer to right edge
if not self.device_config.cpu_only: #Check length of GPU names
width_value = max([len(nnlib.device.getDeviceName(idx))+1 for idx in self.device_config.gpu_idxs] + [width_value])
width_total = width_name + width_value + 2 #Plus 2 for ": "
model_summary_text = []
model_summary_text += [f'=={" Model Summary ":=^{width_total}}=='] # Model/status summary
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={"Model name": >{width_name}}: {self.get_model_name(): <{width_value}}=='] # Name
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={"Current iteration": >{width_name}}: {str(self.iter): <{width_value}}=='] # Iter
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += ["===== Model summary ====="]
model_summary_text += ["== Model name: " + self.get_model_name()]
model_summary_text += ["=="]
model_summary_text += ["== Current iteration: " + str(self.iter)]
model_summary_text += ["=="]
model_summary_text += ["== Model options:"]
model_summary_text += [f'=={" Model Options ":-^{width_total}}=='] # Model options
model_summary_text += [f'=={" "*width_total}==']
for key in self.options.keys():
model_summary_text += ["== |== %s : %s" % (key, self.options[key])]
model_summary_text += [f'=={key: >{width_name}}: {str(self.options[key]): <{width_value}}=='] # self.options key/value pairs
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={" Running On ":-^{width_total}}=='] # Training hardware info
model_summary_text += [f'=={" "*width_total}==']
if self.device_config.multi_gpu:
model_summary_text += ["== |== multi_gpu : True "]
model_summary_text += ["== Running on:"]
model_summary_text += [f'=={"Using multi_gpu": >{width_name}}: {"True": <{width_value}}=='] # multi_gpu
model_summary_text += [f'=={" "*width_total}==']
if self.device_config.cpu_only:
model_summary_text += ["== |== [CPU]"]
model_summary_text += [f'=={"Using device": >{width_name}}: {"CPU": <{width_value}}=='] # cpu_only
else:
for idx in self.device_config.gpu_idxs:
model_summary_text += ["== |== [%d : %s]" % (idx, nnlib.device.getDeviceName(idx))]
model_summary_text += [f'=={"Device index": >{width_name}}: {idx: <{width_value}}=='] # GPU hardware device index
model_summary_text += [f'=={"Name": >{width_name}}: {nnlib.device.getDeviceName(idx): <{width_value}}=='] # GPU name
vram_str = f'{nnlib.device.getDeviceVRAMTotalGb(idx):.2f}GB' # GPU VRAM - Formated as #.## (or ##.##)
model_summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}==']
model_summary_text += [f'=={" "*width_total}==']
model_summary_text += [f'=={"="*width_total}==']
if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[0] == 2:
model_summary_text += ["=="]
model_summary_text += ["== WARNING: You are using 2GB GPU. Result quality may be significantly decreased."]
model_summary_text += ["== If training does not start, close all programs and try again."]
model_summary_text += ["== Also you can disable Windows Aero Desktop to get extra free VRAM."]
model_summary_text += ["=="]
if not self.device_config.cpu_only and self.device_config.gpu_vram_gb[0] <= 2: # Low VRAM warning
model_summary_text += ["/!\\"]
model_summary_text += ["/!\\ WARNING:"]
model_summary_text += ["/!\\ You are using a GPU with 2GB or less VRAM. This may significantly reduce the quality of your result!"]
model_summary_text += ["/!\\ If training does not start, close all programs and try again."]
model_summary_text += ["/!\\ Also you can disable Windows Aero Desktop to increase available VRAM."]
model_summary_text += ["/!\\"]
model_summary_text += ["========================="]
model_summary_text = "\r\n".join(model_summary_text)
model_summary_text = "\n".join (model_summary_text)
self.model_summary_text = model_summary_text
io.log_info(model_summary_text)

View file

@ -36,14 +36,14 @@ def get_image_unique_filestem_paths(dir_path: str, verbose_print_func: Optional[
def get_file_paths(dir_path: str) -> List[str]:
dir_path = Path(dir_path)
if dir_path.exists():
return [x.path for x in scandir(str(dir_path)) if x.is_file()]
return sorted([x.path for x in scandir(str(dir_path)) if x.is_file()])
return []
def get_all_dir_names(dir_path: str) -> List[str]:
dir_path = Path(dir_path)
if dir_path.exists():
return [x.name for x in scandir(str(dir_path)) if x.is_dir()]
return sorted([x.name for x in scandir(str(dir_path)) if x.is_dir()])
return []