diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index 9bf8cd5..8941a08 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -124,7 +124,7 @@ class SAEModel(ModelBase): default_apply_random_ct = ColorTransferMode.NONE if is_first_run else self.options.get('apply_random_ct', ColorTransferMode.NONE) self.options['apply_random_ct'] = io.input_int( - "Apply random color transfer to src faceset? (0) None, (1) LCT, (2) RCT, (3) RCT-masked ?:help skip:%s) : " % (default_apply_random_ct), + "Apply random color transfer to src faceset? (0) None, (1) LCT, (2) RCT, (3) RCT-c, (4) RCT-p, (5) RCT-pc, (6) mRTC, (7) mRTC-c, (8) mRTC-p, (9) mRTC-pc ?:help skip:%s) : " % (default_apply_random_ct), default_apply_random_ct, help_message="Increase variativity of src samples by apply LCT color transfer from random dst " "samples. It is like 'face_style' learning, but more precise color transfer and without " diff --git a/samplelib/SampleProcessor.py b/samplelib/SampleProcessor.py index 3d0cd6d..9d0c895 100644 --- a/samplelib/SampleProcessor.py +++ b/samplelib/SampleProcessor.py @@ -240,7 +240,7 @@ class SampleProcessor(object): if apply_ct == ColorTransferMode.LCT: img_bgr = imagelib.linear_color_transfer(img_bgr, ct_sample_bgr) - elif ColorTransferMode.RCT <= apply_ct <= ColorTransferMode.MASKED_RCT_CLIP_PAPER: + elif ColorTransferMode.RCT <= apply_ct <= ColorTransferMode.MASKED_RCT_PAPER_CLIP: ct_options = { ColorTransferMode.RCT: (False, False, False), ColorTransferMode.RCT_CLIP: (False, False, True), @@ -257,12 +257,12 @@ class SampleProcessor(object): img_bgr = imagelib.reinhard_color_transfer(img_bgr, ct_sample_bgr, clip=use_clip, preserve_paper=use_paper) else: - ct_sample_mask = ct_sample_mask or ct_sample.load_mask() + if ct_sample_mask is None: + ct_sample_mask = ct_sample.load_mask() img_bgr = imagelib.reinhard_color_transfer(img_bgr, ct_sample_bgr, clip=use_clip, preserve_paper=use_paper, source_mask=img_mask, target_mask=ct_sample_mask) - if normalize_std_dev: img_bgr = (img_bgr - img_bgr.mean((0, 1))) / img_bgr.std((0, 1)) elif normalize_vgg: