diff --git a/core/imagelib/filters.py b/core/imagelib/filters.py index 961f882..ba51e07 100644 --- a/core/imagelib/filters.py +++ b/core/imagelib/filters.py @@ -79,8 +79,8 @@ def apply_random_bilinear_resize( img, chance, max_size_per, mask=None, rnd_stat rw = w - int( trg * int(w*(max_size_per/100.0)) ) rh = h - int( trg * int(h*(max_size_per/100.0)) ) - result = cv2.resize (result, (rw,rh), cv2.INTER_LINEAR ) - result = cv2.resize (result, (w,h), cv2.INTER_LINEAR ) + result = cv2.resize (result, (rw,rh), interpolation=cv2.INTER_LINEAR ) + result = cv2.resize (result, (w,h), interpolation=cv2.INTER_LINEAR ) if mask is not None: result = img*(1-mask) + result*mask diff --git a/core/imagelib/warp.py b/core/imagelib/warp.py index 779f483..6860413 100644 --- a/core/imagelib/warp.py +++ b/core/imagelib/warp.py @@ -51,7 +51,7 @@ def warp_by_params (params, img, can_warp, can_transform, can_flip, border_repli rw = params['rw'] if (can_warp or can_transform) and rw is not None: - img = cv2.resize(img, (16,16), cv2_inter) + img = cv2.resize(img, (16,16), interpolation=cv2_inter) if can_warp: img = cv2.remap(img, params['mapx'], params['mapy'], cv2_inter ) @@ -60,7 +60,7 @@ def warp_by_params (params, img, can_warp, can_transform, can_flip, border_repli if (can_warp or can_transform) and rw is not None: - img = cv2.resize(img, (rw,rw), cv2_inter) + img = cv2.resize(img, (rw,rw), interpolation=cv2_inter) if len(img.shape) == 2: img = img[...,None] diff --git a/facelib/FaceEnhancer.py b/facelib/FaceEnhancer.py index 48e21f6..0b5ced3 100644 --- a/facelib/FaceEnhancer.py +++ b/facelib/FaceEnhancer.py @@ -248,7 +248,7 @@ class FaceEnhancer(object): final_img = final_img [t_padding*up_res:(h-b_padding)*up_res, l_padding*up_res:(w-r_padding)*up_res,:] if preserve_size: - final_img = cv2.resize (final_img, (iw,ih), cv2.INTER_LANCZOS4) + final_img = cv2.resize (final_img, (iw,ih), interpolation=cv2.INTER_LANCZOS4) if not is_tanh: final_img = np.clip( final_img/2+0.5, 0, 1 ) @@ -278,7 +278,7 @@ class FaceEnhancer(object): preupscale_rate = 1.0 / ( max(h,w) / patch_size ) if preupscale_rate != 1.0: - inp_img = cv2.resize (inp_img, ( int(w*preupscale_rate), int(h*preupscale_rate) ), cv2.INTER_LANCZOS4) + inp_img = cv2.resize (inp_img, ( int(w*preupscale_rate), int(h*preupscale_rate) ), interpolation=cv2.INTER_LANCZOS4) h,w,c = inp_img.shape i_max = w-patch_size+1 @@ -310,10 +310,10 @@ class FaceEnhancer(object): final_img /= final_img_div if preserve_size: - final_img = cv2.resize (final_img, (w,h), cv2.INTER_LANCZOS4) + final_img = cv2.resize (final_img, (w,h), interpolation=cv2.INTER_LANCZOS4) else: if preupscale_rate != 1.0: - final_img = cv2.resize (final_img, (tw,th), cv2.INTER_LANCZOS4) + final_img = cv2.resize (final_img, (tw,th), interpolation=cv2.INTER_LANCZOS4) if not is_tanh: final_img = np.clip( final_img/2+0.5, 0, 1 ) diff --git a/merger/MergeMasked.py b/merger/MergeMasked.py index 1ca47de..bcdcf8d 100644 --- a/merger/MergeMasked.py +++ b/merger/MergeMasked.py @@ -54,11 +54,11 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, prd_face_bgr = np.clip(prd_face_bgr, 0, 1) if cfg.super_resolution_power != 0: - prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC) - prd_face_dst_mask_a_0 = cv2.resize (prd_face_dst_mask_a_0, (output_size, output_size), cv2.INTER_CUBIC) + prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), interpolation=cv2.INTER_CUBIC) + prd_face_dst_mask_a_0 = cv2.resize (prd_face_dst_mask_a_0, (output_size, output_size), interpolation=cv2.INTER_CUBIC) if cfg.mask_mode == 1: #dst - wrk_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC) + wrk_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), interpolation=cv2.INTER_CUBIC) elif cfg.mask_mode == 2: #learned-prd wrk_face_mask_a_0 = prd_face_mask_a_0 elif cfg.mask_mode == 3: #learned-dst @@ -70,16 +70,16 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, elif cfg.mask_mode >= 6 and cfg.mask_mode <= 9: #XSeg modes if cfg.mask_mode == 6 or cfg.mask_mode == 8 or cfg.mask_mode == 9: # obtain XSeg-prd - prd_face_xseg_bgr = cv2.resize (prd_face_bgr, (xseg_input_size,)*2, cv2.INTER_CUBIC) + prd_face_xseg_bgr = cv2.resize (prd_face_bgr, (xseg_input_size,)*2, interpolation=cv2.INTER_CUBIC) prd_face_xseg_mask = xseg_256_extract_func(prd_face_xseg_bgr) - X_prd_face_mask_a_0 = cv2.resize ( prd_face_xseg_mask, (output_size, output_size), cv2.INTER_CUBIC) + X_prd_face_mask_a_0 = cv2.resize ( prd_face_xseg_mask, (output_size, output_size), interpolation=cv2.INTER_CUBIC) if cfg.mask_mode >= 7 and cfg.mask_mode <= 9: # obtain XSeg-dst xseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, xseg_input_size, face_type=cfg.face_type) dst_face_xseg_bgr = cv2.warpAffine(img_bgr, xseg_mat, (xseg_input_size,)*2, flags=cv2.INTER_CUBIC ) dst_face_xseg_mask = xseg_256_extract_func(dst_face_xseg_bgr) - X_dst_face_mask_a_0 = cv2.resize (dst_face_xseg_mask, (output_size,output_size), cv2.INTER_CUBIC) + X_dst_face_mask_a_0 = cv2.resize (dst_face_xseg_mask, (output_size,output_size), interpolation=cv2.INTER_CUBIC) if cfg.mask_mode == 6: #'XSeg-prd' wrk_face_mask_a_0 = X_prd_face_mask_a_0 @@ -94,7 +94,7 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, # resize to mask_subres_size if wrk_face_mask_a_0.shape[0] != mask_subres_size: - wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (mask_subres_size, mask_subres_size), cv2.INTER_CUBIC) + wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (mask_subres_size, mask_subres_size), interpolation=cv2.INTER_CUBIC) # process mask in local predicted space if 'raw' not in cfg.mode: @@ -131,7 +131,7 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, img_face_mask_a [ img_face_mask_a < (1.0/255.0) ] = 0.0 # get rid of noise if wrk_face_mask_a_0.shape[0] != output_size: - wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC) + wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (output_size,output_size), interpolation=cv2.INTER_CUBIC) wrk_face_mask_a = wrk_face_mask_a_0[...,None] @@ -293,8 +293,8 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, if cfg.bicubic_degrade_power != 0: p = 1.0 - cfg.bicubic_degrade_power / 101.0 - img_bgr_downscaled = cv2.resize (img_bgr, ( int(img_size[0]*p), int(img_size[1]*p ) ), cv2.INTER_CUBIC) - img_bgr = cv2.resize (img_bgr_downscaled, img_size, cv2.INTER_CUBIC) + img_bgr_downscaled = cv2.resize (img_bgr, ( int(img_size[0]*p), int(img_size[1]*p ) ), interpolation=cv2.INTER_CUBIC) + img_bgr = cv2.resize (img_bgr_downscaled, img_size, interpolation=cv2.INTER_CUBIC) new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) diff --git a/samplelib/SampleGeneratorFaceXSeg.py b/samplelib/SampleGeneratorFaceXSeg.py index 0b92daf..7999d16 100644 --- a/samplelib/SampleGeneratorFaceXSeg.py +++ b/samplelib/SampleGeneratorFaceXSeg.py @@ -85,8 +85,8 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase): if face_type == sample.face_type: if w != resolution: - img = cv2.resize( img, (resolution, resolution), cv2.INTER_LANCZOS4 ) - mask = cv2.resize( mask, (resolution, resolution), cv2.INTER_LANCZOS4 ) + img = cv2.resize( img, (resolution, resolution), interpolation=cv2.INTER_LANCZOS4 ) + mask = cv2.resize( mask, (resolution, resolution), interpolation=cv2.INTER_LANCZOS4 ) else: mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, face_type) img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LANCZOS4 ) diff --git a/samplelib/SampleProcessor.py b/samplelib/SampleProcessor.py index c4ebfb6..e3fbbd4 100644 --- a/samplelib/SampleProcessor.py +++ b/samplelib/SampleProcessor.py @@ -144,14 +144,14 @@ class SampleProcessor(object): img = cv2.warpAffine( img, mat, (warp_resolution, warp_resolution), flags=cv2.INTER_LINEAR ) img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=border_replicate, cv2_inter=cv2.INTER_LINEAR) - img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR ) + img = cv2.resize( img, (resolution,resolution), interpolation=cv2.INTER_LINEAR ) else: if face_type != sample_face_type: mat = LandmarksProcessor.get_transform_mat (sample_landmarks, resolution, face_type) img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=borderMode, flags=cv2.INTER_LINEAR ) else: if w != resolution: - img = cv2.resize( img, (resolution, resolution), cv2.INTER_LINEAR ) + img = cv2.resize( img, (resolution, resolution), interpolation=cv2.INTER_LINEAR ) img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=border_replicate, cv2_inter=cv2.INTER_LINEAR) @@ -180,13 +180,13 @@ class SampleProcessor(object): img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=borderMode, flags=cv2.INTER_CUBIC ) else: if w != resolution: - img = cv2.resize( img, (resolution, resolution), cv2.INTER_CUBIC ) + img = cv2.resize( img, (resolution, resolution), interpolation=cv2.INTER_CUBIC ) # Apply random color transfer if ct_mode is not None and ct_sample is not None: if ct_sample_bgr is None: ct_sample_bgr = ct_sample.load_bgr() - img = imagelib.color_transfer (ct_mode, img, cv2.resize( ct_sample_bgr, (resolution,resolution), cv2.INTER_LINEAR ) ) + img = imagelib.color_transfer (ct_mode, img, cv2.resize( ct_sample_bgr, (resolution,resolution), interpolation=cv2.INTER_LINEAR ) ) img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=border_replicate) @@ -227,7 +227,7 @@ class SampleProcessor(object): elif sample_type == SPST.IMAGE: img = sample_bgr img = imagelib.warp_by_params (params_per_resolution[resolution], img, warp, transform, can_flip=True, border_replicate=True) - img = cv2.resize( img, (resolution, resolution), cv2.INTER_CUBIC ) + img = cv2.resize( img, (resolution, resolution), interpolation=cv2.INTER_CUBIC ) out_sample = img if data_format == "NCHW":