diff --git a/README.md b/README.md index 5d4402f..84d4db3 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![](doc/DFL_welcome.jpg) +![](doc/DFL_welcome.jpg) ![](doc/logo_cuda.jpg)![](doc/logo_opencl.jpg)![](doc/logo_keras.jpg)![](doc/logo_tensorflow.jpg)![](doc/logo_plaidml.jpg) @@ -10,12 +10,6 @@ If you like this software, please consider a donation. GOAL: next DeepFacelab update. -[Donate via Yandex.Money](https://money.yandex.ru/to/41001142318065) - -[Donate via Paypal](https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=lepersorium@gmail.com&lc=US&no_note=0&item_name=Support+DeepFaceLab&cn=&curency_code=USD&bn=PP-DonationsBF:btn_donateCC_LG.gif:NonHosted) - -bitcoin:31mPd6DxPCzbpCMZk4k1koWAbErSyqkAXr - - ### [Gallery](doc/gallery/doc_gallery.md) - ### Manuals: diff --git a/facelib/LandmarksProcessor.py b/facelib/LandmarksProcessor.py index 7c0b697..d34c6b3 100644 --- a/facelib/LandmarksProcessor.py +++ b/facelib/LandmarksProcessor.py @@ -143,21 +143,21 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0): face_type = FaceType.FULL remove_align = True - if face_type == FaceType.HALF: - padding = 0 - elif face_type == FaceType.FULL: - padding = (output_size / 64) * 12 - elif face_type == FaceType.HEAD: - padding = (output_size / 64) * 24 - else: - raise ValueError ('wrong face_type: ', face_type) + if face_type == FaceType.HALF: + padding = 0 + elif face_type == FaceType.FULL: + padding = (output_size / 64) * 12 + elif face_type == FaceType.HEAD: + padding = (output_size / 64) * 24 + else: + raise ValueError ('wrong face_type: ', face_type) + + mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2] + mat = mat * (output_size - 2 * padding) + mat[:,2] += padding + mat *= (1 / scale) + mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 ) - mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2] - mat = mat * (output_size - 2 * padding) - mat[:,2] += padding - mat *= (1 / scale) - mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 ) - if remove_align: bbox = transform_points ( [ (0,0), (0,output_size-1), (output_size-1, output_size-1), (output_size-1,0) ], mat, True) area = mathlib.polygon_area(bbox[:,0], bbox[:,1] ) @@ -209,7 +209,7 @@ def get_image_hull_mask (image_shape, image_landmarks, ie_polys=None): for item in parts: merged = np.concatenate(item) - cv2.fillConvexPoly(hull_mask, cv2.convexHull(merged), 1) + cv2.fillConvexPoly(hull_mask, cv2.convexHull(merged), 255.) # pylint: disable=no-member if ie_polys is not None: ie_polys.overlay_mask(hull_mask) @@ -321,7 +321,7 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=Fa mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys) image[...] = ( image * (1-mask) + image * mask / 2 )[...] -def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0)): +def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0) ): draw_landmarks(image, image_landmarks, color=landmarks_color, transparent_mask=transparent_mask, ie_polys=ie_polys) imagelib.draw_rect (image, rect, (255,0,0), 2 ) diff --git a/requirements-cuda.txt b/requirements-cuda.txt index 5e50c09..a62ef4a 100644 --- a/requirements-cuda.txt +++ b/requirements-cuda.txt @@ -2,7 +2,7 @@ numpy==1.16.3 h5py==2.9.0 Keras==2.2.4 opencv-python==4.0.0.21 -tensorflow-gpu==1.12.0 +tensorflow-gpu==1.14.0 plaidml==0.6.0 plaidml-keras==0.5.0 scikit-image