Fixes bug when using both ms-ssim and background power

This commit is contained in:
jh 2021-03-25 14:42:55 -07:00
commit 57688fa93f

View file

@ -476,7 +476,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
if self.options['background_power'] > 0:
bg_factor = self.options['background_power']
if self.options['ms_ssim_loss']:
gpu_src_loss = 10 * nn.MsSsim(resolution)(gpu_target_src, gpu_pred_src_src, max_val=1.0)
gpu_src_loss += bg_factor * 10 * nn.MsSsim(resolution)(gpu_target_src, gpu_pred_src_src, max_val=1.0)
else:
if resolution < 256:
gpu_src_loss += bg_factor * tf.reduce_mean ( 10*nn.dssim(gpu_target_src, gpu_pred_src_src, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])
@ -518,7 +518,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
if self.options['background_power'] > 0:
bg_factor = self.options['background_power']
if self.options['ms_ssim_loss']:
gpu_src_loss = 10 * nn.MsSsim(resolution)(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0)
gpu_src_loss += bg_factor * 10 * nn.MsSsim(resolution)(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0)
else:
if resolution < 256:
gpu_dst_loss += bg_factor * tf.reduce_mean ( 10*nn.dssim(gpu_target_dst, gpu_pred_dst_dst, max_val=1.0, filter_size=int(resolution/11.6)), axis=[1])