fix: typo spelling grammar

This commit is contained in:
slowy07 2021-08-10 17:14:52 +07:00
commit c71b358b89
10 changed files with 10 additions and 10 deletions

View file

@ -19,7 +19,7 @@ def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None, verbose=T
return cv2.imdecode(numpyarray, flags) return cv2.imdecode(numpyarray, flags)
except: except:
if verbose: if verbose:
io.log_err(f"Exception occured in cv2_imread : {traceback.format_exc()}") io.log_err(f"Exception occurred in cv2_imread : {traceback.format_exc()}")
return None return None
def cv2_imwrite(filename, img, *args): def cv2_imwrite(filename, img, *args):

View file

@ -225,7 +225,7 @@ class Subprocessor(object):
self.sent_data = None self.sent_data = None
cli.state = 0 cli.state = 0
elif op == 'error': elif op == 'error':
#some error occured while process data, returning chunk to on_data_return #some error occurred while process data, returning chunk to on_data_return
err_msg = obj.get('err_msg', None) err_msg = obj.get('err_msg', None)
if err_msg is not None: if err_msg is not None:
io.log_info(f'Error while processing data: {err_msg}') io.log_info(f'Error while processing data: {err_msg}')

View file

@ -118,7 +118,7 @@ class FacesetEnhancerSubprocessor(Subprocessor):
return (1, filepath, output_filepath) return (1, filepath, output_filepath)
except: except:
self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}") self.log_err (f"Exception occurred while processing file {filepath}. Error: {traceback.format_exc()}")
return (0, filepath, None) return (0, filepath, None)

View file

@ -160,7 +160,7 @@ class FacesetResizerSubprocessor(Subprocessor):
return (1, filepath, output_filepath) return (1, filepath, output_filepath)
except: except:
self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}") self.log_err (f"Exception occurred while processing file {filepath}. Error: {traceback.format_exc()}")
return (0, filepath, None) return (0, filepath, None)

View file

@ -88,7 +88,7 @@ def main (model_class_name=None,
try: try:
packed_samples = samplelib.PackedFaceset.load(aligned_path) packed_samples = samplelib.PackedFaceset.load(aligned_path)
except: except:
io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}") io.log_err(f"Error occurred while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}")
if packed_samples is not None: if packed_samples is not None:

View file

@ -220,7 +220,7 @@ def main(**kwargs):
thread = threading.Thread(target=trainerThread, args=(s2c, c2s, e), kwargs=kwargs ) thread = threading.Thread(target=trainerThread, args=(s2c, c2s, e), kwargs=kwargs )
thread.start() thread.start()
e.wait() #Wait for inital load to occur. e.wait() #Wait for initial load to occur.
if no_preview: if no_preview:
while True: while True:

View file

@ -578,7 +578,7 @@ class ModelBase(object):
for device in self.device_config.devices: for device in self.device_config.devices:
summary_text += [f'=={"Device index": >{width_name}}: {device.index: <{width_value}}=='] # GPU hardware device index summary_text += [f'=={"Device index": >{width_name}}: {device.index: <{width_value}}=='] # GPU hardware device index
summary_text += [f'=={"Name": >{width_name}}: {device.name: <{width_value}}=='] # GPU name summary_text += [f'=={"Name": >{width_name}}: {device.name: <{width_value}}=='] # GPU name
vram_str = f'{device.total_mem_gb:.2f}GB' # GPU VRAM - Formated as #.## (or ##.##) vram_str = f'{device.total_mem_gb:.2f}GB' # GPU VRAM - Formatred as #.## (or ##.##)
summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}=='] summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}==']
summary_text += [f'=={" "*width_total}=='] summary_text += [f'=={" "*width_total}==']
summary_text += [f'=={"="*width_total}=='] summary_text += [f'=={"="*width_total}==']

View file

@ -325,7 +325,7 @@ class AMPModel(ModelBase):
for gpu_id in range(gpu_count): for gpu_id in range(gpu_count):
with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
with tf.device(f'/CPU:0'): with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first # slice on CPU, otherwise all batch data will be transferred to GPU first
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
gpu_warped_src = self.warped_src [batch_slice,:,:,:] gpu_warped_src = self.warped_src [batch_slice,:,:,:]
gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] gpu_warped_dst = self.warped_dst [batch_slice,:,:,:]

View file

@ -99,7 +99,7 @@ class QModel(ModelBase):
with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
with tf.device(f'/CPU:0'): with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first # slice on CPU, otherwise all batch data will be transferred to GPU first
gpu_warped_src = self.warped_src [batch_slice,:,:,:] gpu_warped_src = self.warped_src [batch_slice,:,:,:]
gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] gpu_warped_dst = self.warped_dst [batch_slice,:,:,:]
gpu_target_src = self.target_src [batch_slice,:,:,:] gpu_target_src = self.target_src [batch_slice,:,:,:]

View file

@ -360,7 +360,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
for gpu_id in range(gpu_count): for gpu_id in range(gpu_count):
with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ):
with tf.device(f'/CPU:0'): with tf.device(f'/CPU:0'):
# slice on CPU, otherwise all batch data will be transfered to GPU first # slice on CPU, otherwise all batch data will be transferred to GPU first
batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu )
gpu_warped_src = self.warped_src [batch_slice,:,:,:] gpu_warped_src = self.warped_src [batch_slice,:,:,:]
gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] gpu_warped_dst = self.warped_dst [batch_slice,:,:,:]