From c71b358b89e4fddaf9ae21d883984da9aad1b8a4 Mon Sep 17 00:00:00 2001 From: slowy07 Date: Tue, 10 Aug 2021 17:14:52 +0700 Subject: [PATCH] fix: typo spelling grammar --- core/cv2ex.py | 2 +- core/joblib/SubprocessorBase.py | 2 +- mainscripts/FacesetEnhancer.py | 2 +- mainscripts/FacesetResizer.py | 2 +- mainscripts/Merger.py | 2 +- mainscripts/Trainer.py | 2 +- models/ModelBase.py | 2 +- models/Model_AMP/Model.py | 2 +- models/Model_Quick96/Model.py | 2 +- models/Model_SAEHD/Model.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/core/cv2ex.py b/core/cv2ex.py index aa5d73c..82c0011 100644 --- a/core/cv2ex.py +++ b/core/cv2ex.py @@ -19,7 +19,7 @@ def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None, verbose=T return cv2.imdecode(numpyarray, flags) except: if verbose: - io.log_err(f"Exception occured in cv2_imread : {traceback.format_exc()}") + io.log_err(f"Exception occurred in cv2_imread : {traceback.format_exc()}") return None def cv2_imwrite(filename, img, *args): diff --git a/core/joblib/SubprocessorBase.py b/core/joblib/SubprocessorBase.py index 17e7056..bdb7021 100644 --- a/core/joblib/SubprocessorBase.py +++ b/core/joblib/SubprocessorBase.py @@ -225,7 +225,7 @@ class Subprocessor(object): self.sent_data = None cli.state = 0 elif op == 'error': - #some error occured while process data, returning chunk to on_data_return + #some error occurred while process data, returning chunk to on_data_return err_msg = obj.get('err_msg', None) if err_msg is not None: io.log_info(f'Error while processing data: {err_msg}') diff --git a/mainscripts/FacesetEnhancer.py b/mainscripts/FacesetEnhancer.py index 3de9cea..76ea08c 100644 --- a/mainscripts/FacesetEnhancer.py +++ b/mainscripts/FacesetEnhancer.py @@ -118,7 +118,7 @@ class FacesetEnhancerSubprocessor(Subprocessor): return (1, filepath, output_filepath) except: - self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}") + self.log_err (f"Exception occurred while processing file {filepath}. Error: {traceback.format_exc()}") return (0, filepath, None) diff --git a/mainscripts/FacesetResizer.py b/mainscripts/FacesetResizer.py index 4bcd1b8..4c2fe0d 100644 --- a/mainscripts/FacesetResizer.py +++ b/mainscripts/FacesetResizer.py @@ -160,7 +160,7 @@ class FacesetResizerSubprocessor(Subprocessor): return (1, filepath, output_filepath) except: - self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}") + self.log_err (f"Exception occurred while processing file {filepath}. Error: {traceback.format_exc()}") return (0, filepath, None) diff --git a/mainscripts/Merger.py b/mainscripts/Merger.py index 0703dc1..348e548 100644 --- a/mainscripts/Merger.py +++ b/mainscripts/Merger.py @@ -88,7 +88,7 @@ def main (model_class_name=None, try: packed_samples = samplelib.PackedFaceset.load(aligned_path) except: - io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}") + io.log_err(f"Error occurred while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}") if packed_samples is not None: diff --git a/mainscripts/Trainer.py b/mainscripts/Trainer.py index 5cc8acf..52dc317 100644 --- a/mainscripts/Trainer.py +++ b/mainscripts/Trainer.py @@ -220,7 +220,7 @@ def main(**kwargs): thread = threading.Thread(target=trainerThread, args=(s2c, c2s, e), kwargs=kwargs ) thread.start() - e.wait() #Wait for inital load to occur. + e.wait() #Wait for initial load to occur. if no_preview: while True: diff --git a/models/ModelBase.py b/models/ModelBase.py index f446efa..a388a41 100644 --- a/models/ModelBase.py +++ b/models/ModelBase.py @@ -578,7 +578,7 @@ class ModelBase(object): for device in self.device_config.devices: summary_text += [f'=={"Device index": >{width_name}}: {device.index: <{width_value}}=='] # GPU hardware device index summary_text += [f'=={"Name": >{width_name}}: {device.name: <{width_value}}=='] # GPU name - vram_str = f'{device.total_mem_gb:.2f}GB' # GPU VRAM - Formated as #.## (or ##.##) + vram_str = f'{device.total_mem_gb:.2f}GB' # GPU VRAM - Formatred as #.## (or ##.##) summary_text += [f'=={"VRAM": >{width_name}}: {vram_str: <{width_value}}=='] summary_text += [f'=={" "*width_total}=='] summary_text += [f'=={"="*width_total}=='] diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index 46d065a..9109ff3 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -325,7 +325,7 @@ class AMPModel(ModelBase): for gpu_id in range(gpu_count): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device(f'/CPU:0'): - # slice on CPU, otherwise all batch data will be transfered to GPU first + # slice on CPU, otherwise all batch data will be transferred to GPU first batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) gpu_warped_src = self.warped_src [batch_slice,:,:,:] gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] diff --git a/models/Model_Quick96/Model.py b/models/Model_Quick96/Model.py index fa9e215..076ba3c 100644 --- a/models/Model_Quick96/Model.py +++ b/models/Model_Quick96/Model.py @@ -99,7 +99,7 @@ class QModel(ModelBase): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) with tf.device(f'/CPU:0'): - # slice on CPU, otherwise all batch data will be transfered to GPU first + # slice on CPU, otherwise all batch data will be transferred to GPU first gpu_warped_src = self.warped_src [batch_slice,:,:,:] gpu_warped_dst = self.warped_dst [batch_slice,:,:,:] gpu_target_src = self.target_src [batch_slice,:,:,:] diff --git a/models/Model_SAEHD/Model.py b/models/Model_SAEHD/Model.py index dda4a4b..3febdd9 100644 --- a/models/Model_SAEHD/Model.py +++ b/models/Model_SAEHD/Model.py @@ -360,7 +360,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ... for gpu_id in range(gpu_count): with tf.device( f'/{devices[gpu_id].tf_dev_type}:{gpu_id}' if len(devices) != 0 else f'/CPU:0' ): with tf.device(f'/CPU:0'): - # slice on CPU, otherwise all batch data will be transfered to GPU first + # slice on CPU, otherwise all batch data will be transferred to GPU first batch_slice = slice( gpu_id*bs_per_gpu, (gpu_id+1)*bs_per_gpu ) gpu_warped_src = self.warped_src [batch_slice,:,:,:] gpu_warped_dst = self.warped_dst [batch_slice,:,:,:]