diff --git a/README.md b/README.md index 08471e4..c860133 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Patreon](https://c5.patreon.com/external/logo/become_a_patron_button@2x.png)](https://www.patreon.com/bePatron?u=22997465) +[![Patreon](https://c5.patreon.com/external/logo/become_a_patron_button@2x.png)](https://www.patreon.com/bePatron?u=22997465) # CHANGELOG ### [View most recent changes](CHANGELOG.md) @@ -36,8 +36,8 @@ More than 95% of deepfake videos are created with DeepFaceLab. DeepFaceLab is used by such popular youtube channels as -|![](doc/tiktok_icon.png) [deeptomcruise](https://www.tiktok.com/@deeptomcruise)| -|---| +|![](doc/tiktok_icon.png) [deeptomcruise](https://www.tiktok.com/@deeptomcruise)|![](doc/tiktok_icon.png) [1facerussia](https://www.tiktok.com/@1facerussia)|![](doc/tiktok_icon.png) [arnoldschwarzneggar](https://www.tiktok.com/@arnoldschwarzneggar) +|---|---|---| |![](doc/youtube_icon.png) [Ctrl Shift Face](https://www.youtube.com/channel/UCKpH0CKltc73e4wh0_pgL3g)|![](doc/youtube_icon.png) [VFXChris Ume](https://www.youtube.com/channel/UCGf4OlX_aTt8DlrgiH3jN3g/videos)|![](doc/youtube_icon.png) [Sham00k](https://www.youtube.com/channel/UCZXbWcv7fSZFTAZV4beckyw/videos)| |---|---|---| @@ -201,7 +201,7 @@ Unfortunately, there is no "make everything ok" button in DeepFaceLab. You shoul -Windows (magnet link) +Windows (magnet link) Last release. Use torrent client to download. diff --git a/core/leras/layers/Conv2D.py b/core/leras/layers/Conv2D.py index ae37c50..7d4d444 100644 --- a/core/leras/layers/Conv2D.py +++ b/core/leras/layers/Conv2D.py @@ -23,28 +23,13 @@ class Conv2D(nn.LayerBase): if padding == "SAME": padding = ( (kernel_size - 1) * dilations + 1 ) // 2 elif padding == "VALID": - padding = 0 + padding = None else: raise ValueError ("Wrong padding type. Should be VALID SAME or INT or 4x INTs") - - if isinstance(padding, int): - if padding != 0: - if nn.data_format == "NHWC": - padding = [ [0,0], [padding,padding], [padding,padding], [0,0] ] - else: - padding = [ [0,0], [0,0], [padding,padding], [padding,padding] ] - else: - padding = None - - if nn.data_format == "NHWC": - strides = [1,strides,strides,1] else: - strides = [1,1,strides,strides] - - if nn.data_format == "NHWC": - dilations = [1,dilations,dilations,1] - else: - dilations = [1,1,dilations,dilations] + padding = int(padding) + + self.in_ch = in_ch self.out_ch = out_ch @@ -93,10 +78,27 @@ class Conv2D(nn.LayerBase): if self.use_wscale: weight = weight * self.wscale - if self.padding is not None: - x = tf.pad (x, self.padding, mode='CONSTANT') + padding = self.padding + if padding is not None: + if nn.data_format == "NHWC": + padding = [ [0,0], [padding,padding], [padding,padding], [0,0] ] + else: + padding = [ [0,0], [0,0], [padding,padding], [padding,padding] ] + x = tf.pad (x, padding, mode='CONSTANT') + + strides = self.strides + if nn.data_format == "NHWC": + strides = [1,strides,strides,1] + else: + strides = [1,1,strides,strides] - x = tf.nn.conv2d(x, weight, self.strides, 'VALID', dilations=self.dilations, data_format=nn.data_format) + dilations = self.dilations + if nn.data_format == "NHWC": + dilations = [1,dilations,dilations,1] + else: + dilations = [1,1,dilations,dilations] + + x = tf.nn.conv2d(x, weight, strides, 'VALID', dilations=dilations, data_format=nn.data_format) if self.use_bias: if nn.data_format == "NHWC": bias = tf.reshape (self.bias, (1,1,1,self.out_ch) ) diff --git a/merger/MergeMasked.py b/merger/MergeMasked.py index 4e94160..7d2caa4 100644 --- a/merger/MergeMasked.py +++ b/merger/MergeMasked.py @@ -57,7 +57,9 @@ def MergeMaskedFace (predictor_func, predictor_input_shape, prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0, (output_size, output_size), interpolation=cv2.INTER_CUBIC) prd_face_dst_mask_a_0 = cv2.resize (prd_face_dst_mask_a_0, (output_size, output_size), interpolation=cv2.INTER_CUBIC) - if cfg.mask_mode == 1: #dst + if cfg.mask_mode == 0: #full + wrk_face_mask_a_0 = np.ones_like(dst_face_mask_a_0) + elif cfg.mask_mode == 1: #dst wrk_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), interpolation=cv2.INTER_CUBIC) elif cfg.mask_mode == 2: #learned-prd wrk_face_mask_a_0 = prd_face_mask_a_0 diff --git a/merger/MergerConfig.py b/merger/MergerConfig.py index 432bdf1..eba1493 100644 --- a/merger/MergerConfig.py +++ b/merger/MergerConfig.py @@ -81,7 +81,8 @@ mode_dict = {0:'original', mode_str_dict = { mode_dict[key] : key for key in mode_dict.keys() } -mask_mode_dict = {1:'dst', +mask_mode_dict = {0:'full', + 1:'dst', 2:'learned-prd', 3:'learned-dst', 4:'learned-prd*learned-dst', diff --git a/models/Model_AMP/Model.py b/models/Model_AMP/Model.py index ad66365..9347944 100644 --- a/models/Model_AMP/Model.py +++ b/models/Model_AMP/Model.py @@ -586,7 +586,7 @@ class AMPModel(ModelBase): gpu_src_dst_code = tf.concat( ( tf.slice(gpu_dst_inter_src_code, [0,0,0,0], [-1, ae_dims_slice , lowest_dense_res, lowest_dense_res]), tf.slice(gpu_dst_inter_dst_code, [0,ae_dims_slice,0,0], [-1,ae_dims-ae_dims_slice, lowest_dense_res,lowest_dense_res]) ), 1 ) - gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_dst_inter_src_code) + gpu_pred_src_dst, gpu_pred_src_dstm = self.decoder(gpu_src_dst_code) _, gpu_pred_dst_dstm = self.decoder(gpu_dst_inter_dst_code) def AE_merge(warped_dst, morph_value): diff --git a/samplelib/SampleLoader.py b/samplelib/SampleLoader.py index edb3775..2989354 100644 --- a/samplelib/SampleLoader.py +++ b/samplelib/SampleLoader.py @@ -23,7 +23,7 @@ class SampleLoader: try: samples = samplelib.PackedFaceset.load(samples_path) except: - io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}") + io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(samples_path)}, {traceback.format_exc()}") if samples is None: raise ValueError("packed faceset not found.")