mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-19 21:13:20 -07:00
Motion blur
This commit is contained in:
parent
3855adb1bd
commit
b2cf0dec59
2 changed files with 18 additions and 2 deletions
|
@ -58,6 +58,8 @@ class SAEHDModel(ModelBase):
|
||||||
default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True)
|
default_random_warp = self.options['random_warp'] = self.load_or_def_option('random_warp', True)
|
||||||
default_random_downsample = self.options['random_downsample'] = self.load_or_def_option('random_downsample', False)
|
default_random_downsample = self.options['random_downsample'] = self.load_or_def_option('random_downsample', False)
|
||||||
default_random_noise = self.options['random_noise'] = self.load_or_def_option('random_noise', False)
|
default_random_noise = self.options['random_noise'] = self.load_or_def_option('random_noise', False)
|
||||||
|
default_random_blur = self.options['random_blur'] = self.load_or_def_option('random_blur', False)
|
||||||
|
|
||||||
default_background_power = self.options['background_power'] = self.load_or_def_option('background_power', 0.0)
|
default_background_power = self.options['background_power'] = self.load_or_def_option('background_power', 0.0)
|
||||||
default_true_face_power = self.options['true_face_power'] = self.load_or_def_option('true_face_power', 0.0)
|
default_true_face_power = self.options['true_face_power'] = self.load_or_def_option('true_face_power', 0.0)
|
||||||
default_face_style_power = self.options['face_style_power'] = self.load_or_def_option('face_style_power', 0.0)
|
default_face_style_power = self.options['face_style_power'] = self.load_or_def_option('face_style_power', 0.0)
|
||||||
|
@ -163,7 +165,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
||||||
|
|
||||||
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
|
self.options['random_downsample'] = io.input_bool("Enable random downsample of samples", default_random_downsample, help_message="")
|
||||||
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
|
self.options['random_noise'] = io.input_bool("Enable random noise added to samples", default_random_noise, help_message="")
|
||||||
# self.options['random_blur'] = io.input_bool("Enable random blur of samples", False, help_message="")
|
self.options['random_blur'] = io.input_bool("Enable random blur of samples", False, help_message="")
|
||||||
# self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", False, help_message="")
|
# self.options['random_jpeg'] = io.input_bool("Enable random jpeg compression of samples", False, help_message="")
|
||||||
|
|
||||||
self.options['gan_version'] = np.clip (io.input_int("GAN version", default_gan_version, add_info="2 or 3", help_message="Choose GAN version (v2: 7/16/2020, v3: 1/3/2021):"), 2, 3)
|
self.options['gan_version'] = np.clip (io.input_int("GAN version", default_gan_version, add_info="2 or 3", help_message="Choose GAN version (v2: 7/16/2020, v3: 1/3/2021):"), 2, 3)
|
||||||
|
@ -755,6 +757,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
||||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp,
|
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp,
|
||||||
'random_downsample': self.options['random_downsample'],
|
'random_downsample': self.options['random_downsample'],
|
||||||
'random_noise': self.options['random_noise'],
|
'random_noise': self.options['random_noise'],
|
||||||
|
'random_blur': self.options['random_blur'],
|
||||||
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode,
|
'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode,
|
||||||
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': ct_mode, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
|
@ -769,6 +772,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
|
||||||
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp,
|
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':random_warp,
|
||||||
'random_downsample': self.options['random_downsample'],
|
'random_downsample': self.options['random_downsample'],
|
||||||
'random_noise': self.options['random_noise'],
|
'random_noise': self.options['random_noise'],
|
||||||
|
'random_blur': self.options['random_blur'],
|
||||||
'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug,
|
'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug,
|
||||||
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False , 'transform':True, 'channel_type' : channel_type, 'ct_mode': fs_aug, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
|
||||||
|
|
|
@ -7,7 +7,7 @@ import numpy as np
|
||||||
|
|
||||||
from core import imagelib
|
from core import imagelib
|
||||||
from core.cv2ex import *
|
from core.cv2ex import *
|
||||||
from core.imagelib import sd
|
from core.imagelib import sd, blursharpen
|
||||||
from core.imagelib.color_transfer import random_lab_rotation
|
from core.imagelib.color_transfer import random_lab_rotation
|
||||||
from facelib import FaceType, LandmarksProcessor
|
from facelib import FaceType, LandmarksProcessor
|
||||||
|
|
||||||
|
@ -114,6 +114,7 @@ class SampleProcessor(object):
|
||||||
transform = opts.get('transform', False)
|
transform = opts.get('transform', False)
|
||||||
random_downsample = opts.get('random_downsample', False)
|
random_downsample = opts.get('random_downsample', False)
|
||||||
random_noise = opts.get('random_noise', False)
|
random_noise = opts.get('random_noise', False)
|
||||||
|
random_blur = opts.get('random_blur', False)
|
||||||
motion_blur = opts.get('motion_blur', None)
|
motion_blur = opts.get('motion_blur', None)
|
||||||
gaussian_blur = opts.get('gaussian_blur', None)
|
gaussian_blur = opts.get('gaussian_blur', None)
|
||||||
random_bilinear_resize = opts.get('random_bilinear_resize', None)
|
random_bilinear_resize = opts.get('random_bilinear_resize', None)
|
||||||
|
@ -237,6 +238,17 @@ class SampleProcessor(object):
|
||||||
noise = np.random.poisson(lam=noise_lam, size=img.shape)
|
noise = np.random.poisson(lam=noise_lam, size=img.shape)
|
||||||
img += noise / 255.0
|
img += noise / 255.0
|
||||||
|
|
||||||
|
# Apply random blur
|
||||||
|
if random_blur:
|
||||||
|
blur_type = np.random.choice(['motion', 'gaussian'])
|
||||||
|
|
||||||
|
if blur_type == 'motion':
|
||||||
|
blur_k = np.random.randint(10, 20)
|
||||||
|
blur_angle = 360 * np.random.random()
|
||||||
|
img = blursharpen.LinearMotionBlur(img, blur_k, blur_angle)
|
||||||
|
elif blur_type == 'gaussian':
|
||||||
|
pass
|
||||||
|
|
||||||
img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=border_replicate)
|
img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=border_replicate)
|
||||||
img = np.clip(img.astype(np.float32), 0, 1)
|
img = np.clip(img.astype(np.float32), 0, 1)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue