Compare commits

...

24 commits

Author SHA1 Message Date
iperov
fc7b787bda
Update README.md 2024-10-24 20:17:15 +04:00
iperov
1d0c1340d7
Update README.md 2024-10-23 07:29:39 +04:00
iperov
715f74f367 upd user faq link to model files 2023-07-28 14:14:10 +04:00
iperov
c4511db198 added module: Face Swap (Insight) 2023-07-09 22:01:38 +04:00
iperov
c7478635cc
Merge pull request #167 from RitikDutta/patch-1
Create examples.md
2023-05-30 12:51:55 +04:00
Ritik Dutta
d185dc9167
Create examples.md
creating md file for Rob_Doe, fixed the example link of Rob_Doe
2023-05-30 14:15:40 +05:30
iperov
b7103b276c update modules versions 2023-05-04 20:25:34 +04:00
iperov
d66ff580d7 upd readme site 2023-04-27 10:46:14 +04:00
iperov
c0aee1f74b added 5 new faces 2023-01-21 13:27:06 +04:00
iperov
e6787518d3 update modules versions 2023-01-21 12:48:25 +04:00
iperov
1f1b7bf376 Merge branch 'master' of github.com:iperov/DeepFaceLive 2022-12-31 12:41:51 +04:00
iperov
b48c103391 upd readme 2022-12-31 12:41:27 +04:00
iperov
5928120bd9
Merge pull request #111 from CeeBeeEh/master
Update Dockerfile
2022-12-27 10:02:14 +04:00
CeeBeeEh
2abd0aa32a
Update Dockerfile
bumped base docker image to latest nvidia tag
2022-12-26 22:25:01 -05:00
iperov
689e6b2f2a upd main_setup.md 2022-11-09 14:28:14 +04:00
iperov
cea06e4f3a update onnxruntime 2022-11-08 19:13:24 +04:00
iperov
38f015d28e upd Rob Doe model 2022-11-01 22:11:25 +04:00
iperov
833a167217 added Jesse Stat and Rob Doe models 2022-10-18 17:06:20 +04:00
iperov
3d35984b24 fix head mode 2022-10-18 16:05:08 +04:00
iperov
f76170171a
Merge pull request #94 from Sajeg/master
Added German Translation
2022-10-13 16:48:27 +04:00
Sajeg
a091328049 Added German Translation 2022-10-13 13:13:56 +02:00
iperov
5a4e11a6aa added Millie Park model 2022-10-07 15:50:29 +04:00
iperov
9c619fb1b8 update pyqt version 2022-10-07 15:29:35 +04:00
iperov
201fa2cb2a added Irina Arty model 2022-10-07 15:23:05 +04:00
60 changed files with 955 additions and 245 deletions

View file

@ -12,7 +12,7 @@
<tr><td colspan=2 align="center">
## Face Swapper
## Face Swap (DFM)
You can swap your face from a webcam or the face in the video using trained face models.
@ -30,7 +30,32 @@ Keanu Reeves
<img src="doc/celebs/Keanu_Reeves/Keanu_Reeves.png" width=128></img>
<a href="doc/celebs/Keanu_Reeves/examples.md">examples</a>
</td><td align="center">
Irina Arty
<img src="doc/celebs/Irina_Arty/Irina_Arty.png" width=128></img>
examples
</td><td align="center">
Millie Park
<img src="doc/celebs/Millie_Park/Millie_Park.png" width=128></img>
examples
</td><td align="center">
Rob Doe
<img src="doc/celebs/Rob_Doe/Rob_Doe.png" width=128></img>
<a href="doc/celebs/Rob_Doe/examples.md">examples</a>
</td><td align="center">
Jesse Stat
<img src="doc/celebs/Jesse_Stat/Jesse_Stat.png" width=128></img>
examples
</td></tr>
</table>
<table align="center" border="0">
@ -168,6 +193,41 @@ Tim Norland
<a href="doc/celebs/Tim_Norland/examples.md">examples</a>
</td></tr></table>
<table align="center" border="0">
<tr align="center"><td>
Natalie Fatman
<img src="doc/celebs/Natalie_Fatman/Natalie_Fatman.png" width=128></img>
<a href="doc/celebs/Natalie_Fatman/examples.md">examples</a>
</td><td align="center">
Liu Lice
<img src="doc/celebs/Liu_Lice/Liu_Lice.png" width=128></img>
<a href="doc/celebs/Liu_Lice/examples.md">examples</a>
</td><td align="center">
Albica Johns
<img src="doc/celebs/Albica_Johns/Albica_Johns.png" width=128></img>
<a href="doc/celebs/Albica_Johns/examples.md">examples</a>
</td><td align="center">
Meggie Merkel
<img src="doc/celebs/Meggie_Merkel/Meggie_Merkel.png" width=128></img>
<a href="doc/celebs/Meggie_Merkel/examples.md">examples</a>
</td><td align="center">
Tina Shift
<img src="doc/celebs/Tina_Shift/Tina_Shift.png" width=128></img>
<a href="doc/celebs/Tina_Shift/examples.md">examples</a>
</td></tr></table>
</td></tr>
<tr><td colspan=2 align="center">
@ -182,6 +242,21 @@ Here is an <a href="https://www.tiktok.com/@arnoldschwarzneggar/video/6995538782
<tr><td colspan=2 align="center">
## Face Swap (Insight)
You can swap your face from a webcam or the face in the video using your own single photo.
<img src="doc/lukashenko.png" width=128></img>
<img src="doc/insight_faceswap_example.gif"></img>
</td></tr>
</table>
<table align="center" border="0">
<tr><td colspan=2 align="center">
## Face Animator
There is also a Face Animator module in DeepFaceLive app. You can control a static face picture using video or your own face from the camera. The quality is not the best, and requires fine face matching and tuning parameters for every face pair, but enough for funny videos and memes or real-time streaming at 25 fps using 35 TFLOPS GPU.
@ -270,14 +345,6 @@ NVIDIA build : NVIDIA cards only, GT730 and higher. Works faster than DX12. Face
<a href="https://discord.gg/rxa7h9M6rH">Discord</a>
</td><td align="left">Official discord channel. English / Russian.</td></tr>
<tr><td align="right">
<a href="https://mrdeepfakes.com/forums/">mrdeepfakes</a>
</td><td align="left">the biggest NSFW English deepfake community</td></tr>
<tr><td align="right">
<a href="https://www.dfldata.xyz">dfldata.xyz</a>
</td><td align="left">中文交流论坛,免费软件教程、模型、人脸数据</td></tr>
<tr><td align="right">
QQ群124500433
</td><td align="left">中文交流QQ群商务合作找群主</td></tr>
@ -298,7 +365,7 @@ Register github account and push "Star" button.
<a href="https://www.paypal.com/paypalme/DeepFaceLab">Donate via Paypal</a>
</td></tr>-->
<tr><td colspan=2 align="center">
<a href="https://money.yandex.ru/to/41001142318065">Donate via Yandex.Money</a>
<a href="https://yoomoney.ru/to/41001142318065">Donate via Yoomoney</a>
</td></tr>
<tr><td colspan=2 align="center">
bitcoin:bc1qewl062v70rszulml3f0mjdjrys8uxdydw3v6rq

View file

@ -11,18 +11,19 @@ from xlib.qt.widgets.QXLabel import QXLabel
from . import backend
from .ui.QCameraSource import QCameraSource
from .ui.QFaceAligner import QFaceAligner
from .ui.QFaceAnimator import QFaceAnimator
from .ui.QFaceDetector import QFaceDetector
from .ui.QFaceMarker import QFaceMarker
from .ui.QFaceMerger import QFaceMerger
from .ui.QFaceAnimator import QFaceAnimator
from .ui.QFaceSwapper import QFaceSwapper
from .ui.QFaceSwapInsight import QFaceSwapInsight
from .ui.QFaceSwapDFM import QFaceSwapDFM
from .ui.QFileSource import QFileSource
from .ui.QFrameAdjuster import QFrameAdjuster
from .ui.QStreamOutput import QStreamOutput
from .ui.widgets.QBCFaceAlignViewer import QBCFaceAlignViewer
from .ui.widgets.QBCFaceSwapViewer import QBCFaceSwapViewer
from .ui.widgets.QBCMergedFrameViewer import QBCMergedFrameViewer
from .ui.widgets.QBCFrameViewer import QBCFrameViewer
from .ui.widgets.QBCMergedFrameViewer import QBCMergedFrameViewer
class QLiveSwap(qtx.QXWidget):
@ -58,13 +59,13 @@ class QLiveSwap(qtx.QXWidget):
face_marker = self.face_marker = backend.FaceMarker (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_detector_bc_out, bc_out=face_marker_bc_out, backend_db=backend_db)
face_aligner = self.face_aligner = backend.FaceAligner (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_marker_bc_out, bc_out=face_aligner_bc_out, backend_db=backend_db )
face_animator = self.face_animator = backend.FaceAnimator (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_merger_bc_out, animatables_path=animatables_path, backend_db=backend_db )
face_swapper = self.face_swapper = backend.FaceSwapper (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, dfm_models_path=dfm_models_path, backend_db=backend_db )
face_swap_insight = self.face_swap_insight = backend.FaceSwapInsight (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, faces_path=animatables_path, backend_db=backend_db )
face_swap_dfm = self.face_swap_dfm = backend.FaceSwapDFM (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, dfm_models_path=dfm_models_path, backend_db=backend_db )
frame_adjuster = self.frame_adjuster = backend.FrameAdjuster(weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_swapper_bc_out, bc_out=frame_adjuster_bc_out, backend_db=backend_db )
face_merger = self.face_merger = backend.FaceMerger (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=frame_adjuster_bc_out, bc_out=face_merger_bc_out, backend_db=backend_db )
stream_output = self.stream_output = backend.StreamOutput (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_merger_bc_out, save_default_path=userdata_path, backend_db=backend_db)
self.all_backends : List[backend.BackendHost] = [file_source, camera_source, face_detector, face_marker, face_aligner, face_animator, face_swapper, frame_adjuster, face_merger, stream_output]
self.all_backends : List[backend.BackendHost] = [file_source, camera_source, face_detector, face_marker, face_aligner, face_animator, face_swap_insight, face_swap_dfm, frame_adjuster, face_merger, stream_output]
self.q_file_source = QFileSource(self.file_source)
self.q_camera_source = QCameraSource(self.camera_source)
@ -72,7 +73,8 @@ class QLiveSwap(qtx.QXWidget):
self.q_face_marker = QFaceMarker(self.face_marker)
self.q_face_aligner = QFaceAligner(self.face_aligner)
self.q_face_animator = QFaceAnimator(self.face_animator, animatables_path=animatables_path)
self.q_face_swapper = QFaceSwapper(self.face_swapper, dfm_models_path=dfm_models_path)
self.q_face_swap_insight = QFaceSwapInsight(self.face_swap_insight, faces_path=animatables_path)
self.q_face_swap_dfm = QFaceSwapDFM(self.face_swap_dfm, dfm_models_path=dfm_models_path)
self.q_frame_adjuster = QFrameAdjuster(self.frame_adjuster)
self.q_face_merger = QFaceMerger(self.face_merger)
self.q_stream_output = QStreamOutput(self.stream_output)
@ -83,8 +85,8 @@ class QLiveSwap(qtx.QXWidget):
self.q_ds_merged_frame_viewer = QBCMergedFrameViewer(backend_weak_heap, face_merger_bc_out)
q_nodes = qtx.QXWidgetHBox([ qtx.QXWidgetVBox([self.q_file_source, self.q_camera_source], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_face_detector, self.q_face_aligner,], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_face_marker, self.q_face_animator, self.q_face_swapper], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_face_detector, self.q_face_aligner, ], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_face_marker, self.q_face_animator, self.q_face_swap_insight, self.q_face_swap_dfm], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_frame_adjuster, self.q_face_merger, self.q_stream_output], spacing=5, fixed_width=256),
], spacing=5, size_policy=('fixed', 'fixed') )
@ -112,7 +114,7 @@ class QLiveSwap(qtx.QXWidget):
def initialize(self):
for bcknd in self.all_backends:
default_state = True
if isinstance(bcknd, (backend.CameraSource, backend.FaceAnimator) ):
if isinstance(bcknd, (backend.CameraSource, backend.FaceAnimator, backend.FaceSwapInsight) ):
default_state = False
bcknd.restore_on_off_state(default_state=default_state)

View file

@ -160,9 +160,7 @@ class FaceAlignerWorker(BackendWorker):
if state.head_mode or state.freeze_z_rotation:
if fsi.face_pose is not None:
head_yaw = fsi.face_pose.as_radians()[1]
face_ulmrks = fsi.face_ulmrks
if face_ulmrks is not None:
fsi.face_resolution = state.resolution
@ -173,11 +171,11 @@ class FaceAlignerWorker(BackendWorker):
x_offset=state.x_offset, y_offset=state.y_offset)
elif state.align_mode == AlignMode.FROM_POINTS:
face_align_img, uni_mat = face_ulmrks.cut(frame_image, state.face_coverage, state.resolution,
face_align_img, uni_mat = face_ulmrks.cut(frame_image, state.face_coverage+ (1.0 if state.head_mode else 0.0), state.resolution,
exclude_moving_parts=state.exclude_moving_parts,
head_yaw=head_yaw,
x_offset=state.x_offset,
y_offset=state.y_offset-0.08,
y_offset=state.y_offset-0.08 + (-0.50 if state.head_mode else 0.0),
freeze_z_rotation=state.freeze_z_rotation)
elif state.align_mode == AlignMode.FROM_STATIC_RECT:
rect = FRect.from_ltrb([ 0.5 - (fsi.face_resolution/W)/2, 0.5 - (fsi.face_resolution/H)/2, 0.5 + (fsi.face_resolution/W)/2, 0.5 + (fsi.face_resolution/H)/2,])

View file

@ -14,13 +14,13 @@ from .BackendBase import (BackendConnection, BackendDB, BackendHost,
BackendWorkerState)
class FaceSwapper(BackendHost):
class FaceSwapDFM(BackendHost):
def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, dfm_models_path : Path, backend_db : BackendDB = None,
id : int = 0):
self._id = id
super().__init__(backend_db=backend_db,
sheet_cls=Sheet,
worker_cls=FaceSwapperWorker,
worker_cls=FaceSwapDFMWorker,
worker_state_cls=WorkerState,
worker_start_args=[weak_heap, reemit_frame_signal, bc_in, bc_out, dfm_models_path])
@ -29,7 +29,7 @@ class FaceSwapper(BackendHost):
def _get_name(self):
return super()._get_name()# + f'{self._id}'
class FaceSwapperWorker(BackendWorker):
class FaceSwapDFMWorker(BackendWorker):
def get_state(self) -> 'WorkerState': return super().get_state()
def get_control_sheet(self) -> 'Sheet.Worker': return super().get_control_sheet()
@ -218,7 +218,7 @@ class FaceSwapperWorker(BackendWorker):
if events.new_status_downloading:
self.set_busy(False)
cs.model_dl_progress.enable()
cs.model_dl_progress.set_config( lib_csw.Progress.Config(title='@FaceSwapper.downloading_model') )
cs.model_dl_progress.set_config( lib_csw.Progress.Config(title='@FaceSwapDFM.downloading_model') )
cs.model_dl_progress.set_progress(0)
elif events.new_status_initialized:
@ -229,12 +229,12 @@ class FaceSwapperWorker(BackendWorker):
cs.model_info_label.enable()
cs.model_info_label.set_config( lib_csw.InfoLabel.Config(info_icon=True,
info_lines=[f'@FaceSwapper.model_information',
info_lines=[f'@FaceSwapDFM.model_information',
'',
f'@FaceSwapper.filename',
f'@FaceSwapDFM.filename',
f'{self.dfm_model.get_model_path().name}',
'',
f'@FaceSwapper.resolution',
f'@FaceSwapDFM.resolution',
f'{model_width}x{model_height}']) )
cs.swap_all_faces.enable()

View file

@ -0,0 +1,269 @@
import time
from pathlib import Path
import cv2
import numpy as np
from modelhub.onnx import InsightFace2D106, InsightFaceSwap, YoloV5Face
from xlib import cv as lib_cv2
from xlib import os as lib_os
from xlib import path as lib_path
from xlib.face import ELandmarks2D, FLandmarks2D, FRect
from xlib.image.ImageProcessor import ImageProcessor
from xlib.mp import csw as lib_csw
from .BackendBase import (BackendConnection, BackendDB, BackendHost,
BackendSignal, BackendWeakHeap, BackendWorker,
BackendWorkerState)
class FaceSwapInsight(BackendHost):
def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, faces_path : Path, backend_db : BackendDB = None,
id : int = 0):
self._id = id
super().__init__(backend_db=backend_db,
sheet_cls=Sheet,
worker_cls=FaceSwapInsightWorker,
worker_state_cls=WorkerState,
worker_start_args=[weak_heap, reemit_frame_signal, bc_in, bc_out, faces_path])
def get_control_sheet(self) -> 'Sheet.Host': return super().get_control_sheet()
def _get_name(self):
return super()._get_name()
class FaceSwapInsightWorker(BackendWorker):
def get_state(self) -> 'WorkerState': return super().get_state()
def get_control_sheet(self) -> 'Sheet.Worker': return super().get_control_sheet()
def on_start(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, faces_path : Path):
self.weak_heap = weak_heap
self.reemit_frame_signal = reemit_frame_signal
self.bc_in = bc_in
self.bc_out = bc_out
self.faces_path = faces_path
self.pending_bcd = None
self.swap_model : InsightFaceSwap = None
self.target_face_img = None
self.face_vector = None
lib_os.set_timer_resolution(1)
state, cs = self.get_state(), self.get_control_sheet()
cs.device.call_on_selected(self.on_cs_device)
cs.face.call_on_selected(self.on_cs_face)
cs.adjust_c.call_on_number(self.on_cs_adjust_c)
cs.adjust_x.call_on_number(self.on_cs_adjust_x)
cs.adjust_y.call_on_number(self.on_cs_adjust_y)
cs.animator_face_id.call_on_number(self.on_cs_animator_face_id)
cs.update_faces.call_on_signal(self.update_faces)
cs.device.enable()
cs.device.set_choices( InsightFaceSwap.get_available_devices(), none_choice_name='@misc.menu_select')
cs.device.select(state.device)
def update_faces(self):
state, cs = self.get_state(), self.get_control_sheet()
cs.face.set_choices([face_path.name for face_path in lib_path.get_files_paths(self.faces_path, extensions=['.jpg','.jpeg','.png'])], none_choice_name='@misc.menu_select')
def on_cs_device(self, idx, device):
state, cs = self.get_state(), self.get_control_sheet()
if device is not None and state.device == device:
self.swap_model = InsightFaceSwap(device)
self.face_detector = YoloV5Face(device)
self.face_marker = InsightFace2D106(device)
cs.face.enable()
self.update_faces()
cs.face.select(state.face)
cs.adjust_c.enable()
cs.adjust_c.set_config(lib_csw.Number.Config(min=1.0, max=2.0, step=0.01, decimals=2, allow_instant_update=True))
adjust_c = state.adjust_c
if adjust_c is None:
adjust_c = 1.55
cs.adjust_c.set_number(adjust_c)
cs.adjust_x.enable()
cs.adjust_x.set_config(lib_csw.Number.Config(min=-0.5, max=0.5, step=0.01, decimals=2, allow_instant_update=True))
adjust_x = state.adjust_x
if adjust_x is None:
adjust_x = 0.0
cs.adjust_x.set_number(adjust_x)
cs.adjust_y.enable()
cs.adjust_y.set_config(lib_csw.Number.Config(min=-0.5, max=0.5, step=0.01, decimals=2, allow_instant_update=True))
adjust_y = state.adjust_y
if adjust_y is None:
adjust_y = -0.15
cs.adjust_y.set_number(adjust_y)
cs.animator_face_id.enable()
cs.animator_face_id.set_config(lib_csw.Number.Config(min=0, max=16, step=1, decimals=0, allow_instant_update=True))
cs.animator_face_id.set_number(state.animator_face_id if state.animator_face_id is not None else 0)
cs.update_faces.enable()
else:
state.device = device
self.save_state()
self.restart()
def on_cs_face(self, idx, face):
state, cs = self.get_state(), self.get_control_sheet()
state.face = face
self.face_vector = None
self.target_face_img = None
if face is not None:
try:
self.target_face_img = lib_cv2.imread(self.faces_path / face)
except Exception as e:
cs.face.unselect()
self.save_state()
self.reemit_frame_signal.send()
def on_cs_adjust_c(self, adjust_c):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.adjust_c.get_config()
adjust_c = state.adjust_c = np.clip(adjust_c, cfg.min, cfg.max)
cs.adjust_c.set_number(adjust_c)
self.face_vector = None
self.save_state()
self.reemit_frame_signal.send()
def on_cs_adjust_x(self, adjust_x):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.adjust_x.get_config()
adjust_x = state.adjust_x = np.clip(adjust_x, cfg.min, cfg.max)
cs.adjust_x.set_number(adjust_x)
self.face_vector = None
self.save_state()
self.reemit_frame_signal.send()
def on_cs_adjust_y(self, adjust_y):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.adjust_y.get_config()
adjust_y = state.adjust_y = np.clip(adjust_y, cfg.min, cfg.max)
cs.adjust_y.set_number(adjust_y)
self.face_vector = None
self.save_state()
self.reemit_frame_signal.send()
def on_cs_animator_face_id(self, animator_face_id):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.animator_face_id.get_config()
animator_face_id = state.animator_face_id = int(np.clip(animator_face_id, cfg.min, cfg.max))
cs.animator_face_id.set_number(animator_face_id)
self.save_state()
self.reemit_frame_signal.send()
def on_tick(self):
state, cs = self.get_state(), self.get_control_sheet()
if self.pending_bcd is None:
self.start_profile_timing()
bcd = self.bc_in.read(timeout=0.005)
if bcd is not None:
bcd.assign_weak_heap(self.weak_heap)
if self.face_vector is None and self.target_face_img is not None:
rects = self.face_detector.extract (self.target_face_img, threshold=0.5)[0]
if len(rects) > 0:
_,H,W,_ = ImageProcessor(self.target_face_img).get_dims()
u_rects = [ FRect.from_ltrb( (l/W, t/H, r/W, b/H) ) for l,t,r,b in rects ]
face_urect = FRect.sort_by_area_size(u_rects)[0] # sorted by largest
face_image, face_uni_mat = face_urect.cut(self.target_face_img, 1.6, 192)
lmrks = self.face_marker.extract(face_image)[0]
lmrks = lmrks[...,0:2] / (192,192)
face_ulmrks = FLandmarks2D.create (ELandmarks2D.L106, lmrks).transform(face_uni_mat, invert=True)
face_align_img, _ = face_ulmrks.cut(self.target_face_img, state.adjust_c,
self.swap_model.get_face_vector_input_size(),
x_offset=state.adjust_x,
y_offset=state.adjust_y)
self.face_vector = self.swap_model.get_face_vector(face_align_img)
swap_model = self.swap_model
if swap_model is not None and self.face_vector is not None:
for i, fsi in enumerate(bcd.get_face_swap_info_list()):
if state.animator_face_id == i:
face_align_image = bcd.get_image(fsi.face_align_image_name)
if face_align_image is not None:
_,H,W,_ = ImageProcessor(face_align_image).get_dims()
anim_image = swap_model.generate(face_align_image, self.face_vector)
anim_image = ImageProcessor(anim_image).resize((W,H)).get_image('HWC')
fsi.face_align_mask_name = f'{fsi.face_align_image_name}_mask'
fsi.face_swap_image_name = f'{fsi.face_align_image_name}_swapped'
fsi.face_swap_mask_name = f'{fsi.face_swap_image_name}_mask'
bcd.set_image(fsi.face_swap_image_name, anim_image)
white_mask = np.full_like(anim_image, 255, dtype=np.uint8)
bcd.set_image(fsi.face_align_mask_name, white_mask)
bcd.set_image(fsi.face_swap_mask_name, white_mask)
break
self.stop_profile_timing()
self.pending_bcd = bcd
if self.pending_bcd is not None:
if self.bc_out.is_full_read(1):
self.bc_out.write(self.pending_bcd)
self.pending_bcd = None
else:
time.sleep(0.001)
class Sheet:
class Host(lib_csw.Sheet.Host):
def __init__(self):
super().__init__()
self.device = lib_csw.DynamicSingleSwitch.Client()
self.face = lib_csw.DynamicSingleSwitch.Client()
self.animator_face_id = lib_csw.Number.Client()
self.update_faces = lib_csw.Signal.Client()
self.adjust_c = lib_csw.Number.Client()
self.adjust_x = lib_csw.Number.Client()
self.adjust_y = lib_csw.Number.Client()
class Worker(lib_csw.Sheet.Worker):
def __init__(self):
super().__init__()
self.device = lib_csw.DynamicSingleSwitch.Host()
self.face = lib_csw.DynamicSingleSwitch.Host()
self.animator_face_id = lib_csw.Number.Host()
self.update_faces = lib_csw.Signal.Host()
self.adjust_c = lib_csw.Number.Host()
self.adjust_x = lib_csw.Number.Host()
self.adjust_y = lib_csw.Number.Host()
class WorkerState(BackendWorkerState):
device = None
face : str = None
animator_face_id : int = None
adjust_c : float = None
adjust_x : float = None
adjust_y : float = None

View file

@ -7,7 +7,8 @@ from .FaceAnimator import FaceAnimator
from .FaceDetector import FaceDetector
from .FaceMarker import FaceMarker
from .FaceMerger import FaceMerger
from .FaceSwapper import FaceSwapper
from .FaceSwapInsight import FaceSwapInsight
from .FaceSwapDFM import FaceSwapDFM
from .FileSource import FileSource
from .FrameAdjuster import FrameAdjuster
from .StreamOutput import StreamOutput

View file

@ -4,7 +4,7 @@ from localization import L
from resources.gfx import QXImageDB
from xlib import qt as qtx
from ..backend import FaceSwapper
from ..backend import FaceSwapDFM
from .widgets.QBackendPanel import QBackendPanel
from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag
from .widgets.QComboBoxCSWDynamicSingleSwitch import \
@ -17,8 +17,8 @@ from .widgets.QSliderCSWNumber import QSliderCSWNumber
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
class QFaceSwapper(QBackendPanel):
def __init__(self, backend : FaceSwapper, dfm_models_path : Path):
class QFaceSwapDFM(QBackendPanel):
def __init__(self, backend : FaceSwapDFM, dfm_models_path : Path):
self._dfm_models_path = dfm_models_path
cs = backend.get_control_sheet()
@ -28,7 +28,7 @@ class QFaceSwapper(QBackendPanel):
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
q_model_label = QLabelPopupInfo(label=L('@QFaceSwapper.model'), popup_info_text=L('@QFaceSwapper.help.model') )
q_model_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.model'), popup_info_text=L('@QFaceSwapDFM.help.model') )
q_model = QComboBoxCSWDynamicSingleSwitch(cs.model, reflect_state_widgets=[q_model_label, btn_open_folder])
q_model_dl_error = self._q_model_dl_error = QErrorCSWError(cs.model_dl_error)
@ -36,31 +36,31 @@ class QFaceSwapper(QBackendPanel):
q_model_info_label = self._q_model_info_label = QLabelPopupInfoCSWInfoLabel(cs.model_info_label)
q_swap_all_faces_label = QLabelPopupInfo(label=L('@QFaceSwapper.swap_all_faces') )
q_swap_all_faces_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.swap_all_faces') )
q_swap_all_faces = QCheckBoxCSWFlag(cs.swap_all_faces, reflect_state_widgets=[q_swap_all_faces_label])
q_face_id_label = QLabelPopupInfo(label=L('@QFaceSwapper.face_id'), popup_info_text=L('@QFaceSwapper.help.face_id') )
q_face_id_label = QLabelPopupInfo(label=L('@common.face_id'), popup_info_text=L('@QFaceSwapDFM.help.face_id') )
q_face_id = QSpinBoxCSWNumber(cs.face_id, reflect_state_widgets=[q_face_id_label])
q_morph_factor_label = QLabelPopupInfo(label=L('@QFaceSwapper.morph_factor'), popup_info_text=L('@QFaceSwapper.help.morph_factor') )
q_morph_factor_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.morph_factor'), popup_info_text=L('@QFaceSwapDFM.help.morph_factor') )
q_morph_factor = QSliderCSWNumber(cs.morph_factor, reflect_state_widgets=[q_morph_factor_label])
q_sharpen_amount_label = QLabelPopupInfo(label=L('@QFaceSwapper.presharpen_amount'), popup_info_text=L('@QFaceSwapper.help.presharpen_amount') )
q_sharpen_amount_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.presharpen_amount'), popup_info_text=L('@QFaceSwapDFM.help.presharpen_amount') )
q_sharpen_amount = QSliderCSWNumber(cs.presharpen_amount, reflect_state_widgets=[q_sharpen_amount_label])
q_pre_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapper.pregamma'), popup_info_text=L('@QFaceSwapper.help.pregamma') )
q_pre_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.pregamma'), popup_info_text=L('@QFaceSwapDFM.help.pregamma') )
q_pre_gamma_red = QSpinBoxCSWNumber(cs.pre_gamma_red, reflect_state_widgets=[q_pre_gamma_label])
q_pre_gamma_green = QSpinBoxCSWNumber(cs.pre_gamma_green)
q_pre_gamma_blue = QSpinBoxCSWNumber(cs.pre_gamma_blue)
q_post_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapper.postgamma'))
q_post_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.postgamma'))
q_post_gamma_red = QSpinBoxCSWNumber(cs.post_gamma_red, reflect_state_widgets=[q_post_gamma_label])
q_post_gamma_green = QSpinBoxCSWNumber(cs.post_gamma_green)
q_post_gamma_blue = QSpinBoxCSWNumber(cs.post_gamma_blue)
q_two_pass_label = QLabelPopupInfo(label=L('@QFaceSwapper.two_pass'), popup_info_text=L('@QFaceSwapper.help.two_pass') )
q_two_pass_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.two_pass'), popup_info_text=L('@QFaceSwapDFM.help.two_pass') )
q_two_pass = QCheckBoxCSWFlag(cs.two_pass, reflect_state_widgets=[q_two_pass_label])
grid_l = qtx.QXGridLayout( spacing=5)
@ -94,7 +94,7 @@ class QFaceSwapper(QBackendPanel):
grid_l.addWidget(q_two_pass, row, 1)
row += 1
super().__init__(backend, L('@QFaceSwapper.module_title'),
super().__init__(backend, L('@QFaceSwapDFM.module_title'),
layout=qtx.QXVBoxLayout([grid_l]) )

View file

@ -0,0 +1,71 @@
from pathlib import Path
from localization import L
from resources.gfx import QXImageDB
from xlib import qt as qtx
from ..backend import FaceSwapInsight
from .widgets.QBackendPanel import QBackendPanel
from .widgets.QComboBoxCSWDynamicSingleSwitch import \
QComboBoxCSWDynamicSingleSwitch
from .widgets.QLabelPopupInfo import QLabelPopupInfo
from .widgets.QSliderCSWNumber import QSliderCSWNumber
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
from .widgets.QXPushButtonCSWSignal import QXPushButtonCSWSignal
class QFaceSwapInsight(QBackendPanel):
def __init__(self, backend : FaceSwapInsight, faces_path : Path):
self._faces_path = faces_path
cs = backend.get_control_sheet()
btn_open_folder = self.btn_open_folder = qtx.QXPushButton(image = QXImageDB.eye_outline('light gray'), tooltip_text='Reveal in Explorer', released=self._btn_open_folder_released, fixed_size=(24,22) )
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
q_face_label = QLabelPopupInfo(label=L('@QFaceSwapInsight.face') )
q_face = QComboBoxCSWDynamicSingleSwitch(cs.face, reflect_state_widgets=[q_face_label, btn_open_folder])
q_adjust_c_label = QLabelPopupInfo(label='C')
q_adjust_c = QSliderCSWNumber(cs.adjust_c, reflect_state_widgets=[q_adjust_c_label])
q_adjust_x_label = QLabelPopupInfo(label='X')
q_adjust_x = QSliderCSWNumber(cs.adjust_x, reflect_state_widgets=[q_adjust_x_label])
q_adjust_y_label = QLabelPopupInfo(label='Y')
q_adjust_y = QSliderCSWNumber(cs.adjust_y, reflect_state_widgets=[q_adjust_y_label])
q_animator_face_id_label = QLabelPopupInfo(label=L('@common.face_id') )
q_animator_face_id = QSpinBoxCSWNumber(cs.animator_face_id, reflect_state_widgets=[q_animator_face_id_label])
q_update_faces = QXPushButtonCSWSignal(cs.update_faces, image=QXImageDB.reload_outline('light gray'), button_size=(24,22) )
grid_l = qtx.QXGridLayout( spacing=5)
row = 0
grid_l.addWidget(q_device_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_device, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_face_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addLayout(qtx.QXHBoxLayout([q_face, 2, btn_open_folder, 2, q_update_faces]), row, 1 )
row += 1
grid_l.addWidget(q_adjust_c_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_adjust_c, row, 1 )
row += 1
grid_l.addWidget(q_adjust_x_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_adjust_x, row, 1 )
row += 1
grid_l.addWidget(q_adjust_y_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_adjust_y, row, 1 )
row += 1
grid_l.addWidget(q_animator_face_id_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_animator_face_id, row, 1, alignment=qtx.AlignLeft )
row += 1
super().__init__(backend, L('@QFaceSwapInsight.module_title'),
layout=qtx.QXVBoxLayout([grid_l]) )
def _btn_open_folder_released(self):
qtx.QDesktopServices.openUrl(qtx.QUrl.fromLocalFile( str(self._faces_path) ))

View file

@ -1,4 +1,4 @@
FROM nvidia/cuda:11.4.2-cudnn8-runtime-ubuntu20.04
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu20.04
WORKDIR /app
ARG DEBIAN_FRONTEND=noninteractive
ARG NV_VER
@ -10,7 +10,7 @@ RUN ln -s /usr/bin/python3 /usr/bin/python
RUN git clone https://github.com/iperov/DeepFaceLive.git
RUN python -m pip install --upgrade pip
RUN python -m pip install onnxruntime-gpu==1.12.1 numpy==1.21.6 h5py numexpr protobuf==3.20.1 opencv-python==4.6.0.66 opencv-contrib-python==4.6.0.66 pyqt6==6.3.1 onnx==1.12.0 torch==1.10.0 torchvision==0.11.1
RUN python -m pip install onnxruntime-gpu==1.15.1 numpy==1.21.6 h5py numexpr protobuf==3.20.1 opencv-python==4.8.0.74 opencv-contrib-python==4.8.0.74 pyqt6==6.5.1 onnx==1.14.0 torch==1.13.1 torchvision==0.14.1
RUN apt install -y libnvidia-compute-$NV_VER

Binary file not shown.

View file

@ -1,6 +1,7 @@
import argparse
import os
import shutil
import ssl
import subprocess
import time
import urllib.request
@ -81,7 +82,7 @@ class WindowsFolderBuilder:
f = None
while True:
try:
url_request = urllib.request.urlopen(url)
url_request = urllib.request.urlopen(url, context=ssl._create_unverified_context())
url_size = int( url_request.getheader('content-length') )
if use_cached:
@ -462,7 +463,7 @@ pause
""")
def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.7.9', backend='cuda'):
def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.8.10', backend='cuda'):
builder = WindowsFolderBuilder(release_path=Path(release_dir),
cache_path=Path(cache_dir),
@ -475,19 +476,19 @@ def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.7.9', backe
builder.install_pip_package('h5py')
builder.install_pip_package('numexpr')
builder.install_pip_package('protobuf==3.20.1')
builder.install_pip_package('opencv-python==4.6.0.66')
builder.install_pip_package('opencv-contrib-python==4.6.0.66')
builder.install_pip_package('pyqt6==6.3.1')
builder.install_pip_package('onnx==1.12.0')
builder.install_pip_package('opencv-python==4.8.0.74')
builder.install_pip_package('opencv-contrib-python==4.8.0.74')
builder.install_pip_package('pyqt6==6.5.1')
builder.install_pip_package('onnx==1.14.0')
if backend == 'cuda':
#builder.install_pip_package('torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html')
#builder.install_pip_package('torch==1.11.0+cu115 torchvision==0.12.0+cu115 -f https://download.pytorch.org/whl/torch_stable.html')
builder.install_pip_package('torch==1.12.1+cu116 torchvision==0.13.1+cu116 -f https://download.pytorch.org/whl/torch_stable.html')
builder.install_pip_package('torch==1.13.1+cu117 torchvision==0.14.1+cu117 -f https://download.pytorch.org/whl/torch_stable.html')
builder.install_pip_package('onnxruntime-gpu==1.11.0')
builder.install_pip_package('onnxruntime-gpu==1.15.1')
elif backend == 'directml':
builder.install_pip_package('onnxruntime-directml==1.12.1')
builder.install_pip_package('onnxruntime-directml==1.15.1')
builder.install_ffmpeg_binaries()
@ -562,7 +563,7 @@ if __name__ == '__main__':
p.add_argument('--build-type', required=True, choices=['dfl-windows'])
p.add_argument('--release-dir', action=fixPathAction, default=None)
p.add_argument('--cache-dir', action=fixPathAction, default=None)
p.add_argument('--python-ver', default="3.7.9")
p.add_argument('--python-ver', default="3.8.10")
p.add_argument('--backend', choices=['cuda', 'directml'], default='cuda')
args = p.parse_args()

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

View file

@ -0,0 +1 @@
https://user-images.githubusercontent.com/8076202/213858809-d52c59a4-51ed-4964-b4d1-83bd0e3bca91.mp4

Binary file not shown.

After

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

Binary file not shown.

View file

@ -0,0 +1 @@
https://user-images.githubusercontent.com/8076202/213858383-faddec1e-40a0-4d1c-8254-076079ff6eeb.mp4

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

Binary file not shown.

View file

@ -0,0 +1 @@
https://user-images.githubusercontent.com/8076202/213859082-6b630926-0f04-4de1-b58b-a1ec87fba3c1.mp4

Binary file not shown.

After

Width:  |  Height:  |  Size: 684 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

View file

@ -0,0 +1 @@
https://user-images.githubusercontent.com/8076202/213857336-26b6fc54-2c3d-41f5-a909-ded7ae8343d5.mp4

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

View file

@ -0,0 +1 @@
https://github.com/iperov/DeepFaceLive/assets/30226719/e6e4cb01-da58-4beb-8a92-c431b9893d6f

Binary file not shown.

After

Width:  |  Height:  |  Size: 127 KiB

Binary file not shown.

View file

@ -0,0 +1 @@
https://user-images.githubusercontent.com/8076202/213859298-ea771c20-65f4-475e-8c08-9a2db29673e2.mp4

Binary file not shown.

Before

Width:  |  Height:  |  Size: 318 KiB

After

Width:  |  Height:  |  Size: 288 KiB

Before After
Before After

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 MiB

BIN
doc/lukashenko.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 311 KiB

View file

@ -57,7 +57,7 @@ If you are novice, learn all about DeepFaceLab https://mrdeepfakes.com/forums/th
Gather 5000+ samples of your face with various conditions using webcam which will be used for Live. The conditions are as follows: different lighting, different facial expressions, head direction, eyes direction, being far or closer to the camera, etc. Sort faceset by best to 2000.
Here public storage https://disk.yandex.ru/d/7i5XTKIKVg5UUg with facesets and models.
Here public storage https://helurl.com/drive/s/IfmyaC4f1IvScaWknpU8DrpecacgZ6 with facesets and models.
> Using pretrained "RTT model 224 V2.zip" from public storage (see above)

View file

@ -27,7 +27,7 @@
</td></tr>
<tr><td colspan=2 align="center">
### Run and unpack to root of any disk.
### Run and unpack to **root of any disk**.
<img src="run_and_unpack.jpg"></img>

File diff suppressed because it is too large Load diff

View file

@ -15,6 +15,15 @@ from xlib import os as lib_os
# # set environ for onnxruntime
# # os.environ['CUDA_PATH_V11_2'] = os.environ['CUDA_PATH']
# from modelhub.onnx import InsightFaceSwap
# x = InsightFaceSwap(InsightFaceSwap.get_available_devices()[0])
# import code
# code.interact(local=dict(globals(), **locals()))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()

View file

@ -34,6 +34,7 @@ class DFMModelInfo:
def get_available_models_info(models_path : Path) -> List[DFMModelInfo]:
# predefined list of celebs with urls
dfm_models = [
DFMModelInfo(name='Albica Johns', model_path=models_path / f'Albica_Johns.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/ALBICA_JOHNS/Albica_Johns.dfm'),
DFMModelInfo(name='Amber Song', model_path=models_path / f'Amber_Song.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/AMBER_SONG/Amber_Song.dfm'),
DFMModelInfo(name='Ava de Addario', model_path=models_path / f'Ava_de_Addario.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/AVA_DE_ADDARIO/Ava_de_Addario.dfm'),
DFMModelInfo(name='Bryan Greynolds', model_path=models_path / f'Bryan_Greynolds.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/BRYAN_GREYNOLDS/Bryan_Greynolds.dfm'),
@ -42,16 +43,24 @@ def get_available_models_info(models_path : Path) -> List[DFMModelInfo]:
DFMModelInfo(name='Dilraba Dilmurat', model_path=models_path / f'Dilraba_Dilmurat.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/DILRABA_DILMURAT/Dilraba_Dilmurat.dfm'),
DFMModelInfo(name='Emily Winston', model_path=models_path / f'Emily_Winston.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/EMILY_WINSTON/Emily_Winston.dfm'),
DFMModelInfo(name='Ewon Spice', model_path=models_path / f'Ewon_Spice.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/EWON_SPICE/Ewon_Spice.dfm'),
DFMModelInfo(name='Irina Arty', model_path=models_path / f'Irina_Arty.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/IRINA_ARTY/Irina_Arty.dfm'),
DFMModelInfo(name='Jackie Chan', model_path=models_path / f'Jackie_Chan.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/JACKIE_CHAN/Jackie_Chan.dfm'),
DFMModelInfo(name='Jesse Stat 320', model_path=models_path / f'Jesse_Stat_320.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/JESSE_STAT/Jesse_Stat_320.dfm'),
DFMModelInfo(name='Joker', model_path=models_path / f'Joker.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/JOKER/Joker.dfm'),
DFMModelInfo(name='Keanu Reeves', model_path=models_path / f'Keanu_Reeves.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/KEANU_REEVES/Keanu_Reeves.dfm'),
DFMModelInfo(name='Keanu Reeves 320', model_path=models_path / f'Keanu_Reeves_320.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/KEANU_REEVES_320/Keanu_Reeves_320.dfm'),
DFMModelInfo(name='Kim Jarrey', model_path=models_path / f'Kim_Jarrey.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/KIM_JARREY/Kim_Jarrey.dfm'),
DFMModelInfo(name='Liu Lice', model_path=models_path / f'Liu_Lice.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/LIU_LICE/Liu_Lice.dfm'),
DFMModelInfo(name='Matilda Bobbie', model_path=models_path / f'Matilda_Bobbie.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/MATILDA_BOBBIE/Matilda_Bobbie.dfm'),
DFMModelInfo(name='Meggie Merkel', model_path=models_path / f'Meggie_Merkel.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/MEGGIE_MERKEL/Meggie_Merkel.dfm'),
DFMModelInfo(name='Millie Park', model_path=models_path / f'Millie_Park.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/MILLIE_PARK/Millie_Park.dfm'),
DFMModelInfo(name='Mr. Bean', model_path=models_path / f'Mr_Bean.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/MR_BEAN/Mr_Bean.dfm'),
DFMModelInfo(name='Natalie Fatman', model_path=models_path / f'Natalie_Fatman.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/NATALIE_FATMAN/Natalie_Fatman.dfm'),
DFMModelInfo(name='Natasha Former', model_path=models_path / f'Natasha_Former.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/NATASHA_FORMER/Natasha_Former.dfm'),
DFMModelInfo(name='Nicola Badge', model_path=models_path / f'Nicola_Badge.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/NICOLA_BADGE/Nicola_Badge.dfm'),
DFMModelInfo(name='Rob Doe', model_path=models_path / f'Rob_Doe.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/ROB_DOE/Rob_Doe.dfm'),
DFMModelInfo(name='Silwan Stillwone', model_path=models_path / f'Silwan_Stillwone.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/SILWAN_STILLWONE/Silwan_Stillwone.dfm'),
DFMModelInfo(name='Tina Shift', model_path=models_path / f'Tina_Shift.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/TINA_SHIFT/Tina_Shift.dfm'),
DFMModelInfo(name='Tim Chrys', model_path=models_path / f'Tim_Chrys.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/TIM_CHRYS/Tim_Chrys.dfm'),
DFMModelInfo(name='Tim Norland', model_path=models_path / f'Tim_Norland.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/TIM_NORLAND/Tim_Norland.dfm'),
DFMModelInfo(name='Yohanna Coralson', model_path=models_path / f'Yohanna_Coralson.dfm', url=rf'https://github.com/iperov/DeepFaceLive/releases/download/YOHANNA_CORALSON/Yohanna_Coralson.dfm'),

View file

@ -0,0 +1,93 @@
from pathlib import Path
from typing import List
import numpy as np
from xlib.file import SplittedFile
from xlib.image import ImageProcessor
from xlib.onnxruntime import (InferenceSession_with_device, ORTDeviceInfo,
get_available_devices_info)
import cv2
import onnx
from onnx import numpy_helper
class InsightFaceSwap:
"""
arguments
device_info ORTDeviceInfo
use LIA.get_available_devices()
to determine a list of avaliable devices accepted by model
raises
Exception
"""
@staticmethod
def get_available_devices() -> List[ORTDeviceInfo]:
return get_available_devices_info()
def __init__(self, device_info : ORTDeviceInfo):
if device_info not in InsightFaceSwap.get_available_devices():
raise Exception(f'device_info {device_info} is not in available devices for InsightFaceSwap')
inswapper_path = Path(__file__).parent / 'inswapper_128.onnx'
SplittedFile.merge(inswapper_path, delete_parts=False)
if not inswapper_path.exists():
raise FileNotFoundError(f'{inswapper_path} not found')
w600k_path = Path(__file__).parent / 'w600k_r50.onnx'
SplittedFile.merge(w600k_path, delete_parts=False)
if not w600k_path.exists():
raise FileNotFoundError(f'{w600k_path} not found')
self._sess_swap = InferenceSession_with_device(str(inswapper_path), device_info)
self._sess_rec = InferenceSession_with_device(str(w600k_path), device_info)
swap_onnx_model = onnx.load(str(inswapper_path))
self._emap = numpy_helper.to_array(swap_onnx_model.graph.initializer[-1])
def get_input_size(self):
"""
returns optimal Width/Height for input images, thus you can resize source image to avoid extra load
"""
return 128
def get_face_vector_input_size(self):
return 112
def get_face_vector(self, img : np.ndarray) -> np.ndarray:
ip = ImageProcessor(img)
ip.fit_in(TW=112, TH=112, pad_to_target=True, allow_upscale=True)
img = ip.ch(3).to_ufloat32().get_image('NCHW')
latent = self._sess_rec.run([self._sess_rec.get_outputs()[0].name], {self._sess_rec.get_inputs()[0].name: img,})[0]
latent = np.dot(latent.reshape(1, -1,), self._emap)
latent /= np.linalg.norm(latent)
return latent
def generate(self, img : np.ndarray, face_vector : np.ndarray):
"""
arguments
img np.ndarray HW HWC 1HWC uint8/float32
face_vector np.ndarray
"""
ip_target = ImageProcessor(img)
dtype = ip_target.get_dtype()
_,H,W,_ = ip_target.get_dims()
out = self._sess_swap.run(['output'], {'target' : ip_target.resize( (128, 128) ).ch(3).to_ufloat32().get_image('NCHW'),
'source' : face_vector}
)[0].transpose(0,2,3,1)[0]
out = ImageProcessor(out).to_dtype(dtype).resize((W,H)).get_image('HWC')
return out

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,6 +1,7 @@
from .CenterFace.CenterFace import CenterFace
from .FaceMesh.FaceMesh import FaceMesh
from .InsightFace2d106.InsightFace2D106 import InsightFace2D106
from .InsightFaceSwap.InsightFaceSwap import InsightFaceSwap
from .LIA.LIA import LIA
from .S3FD.S3FD import S3FD
from .YoloV5Face.YoloV5Face import YoloV5Face
from .InsightFace2d106.InsightFace2D106 import InsightFace2D106
from .LIA.LIA import LIA

View file

@ -10,6 +10,8 @@ from xlib import cv as lib_cv
repo_root = Path(__file__).parent.parent
large_files_list = [ (repo_root / 'modelhub' / 'onnx' / 'S3FD' / 'S3FD.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'onnx' / 'LIA' / 'generator.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'onnx' / 'InsightFaceSwap' / 'inswapper_128.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'onnx' / 'InsightFaceSwap' / 'w600k_r50.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'torch' / 'S3FD' / 'S3FD.pth', 48*1024*1024),
(repo_root / 'modelhub' / 'cv' / 'FaceMarkerLBF' / 'lbfmodel.yaml', 34*1024*1024),
]