added module: Face Swap (Insight)

This commit is contained in:
iperov 2023-07-09 22:01:38 +04:00
parent c7478635cc
commit c4511db198
33 changed files with 567 additions and 75 deletions

View file

@ -12,7 +12,7 @@
<tr><td colspan=2 align="center"> <tr><td colspan=2 align="center">
## Face Swapper ## Face Swap (DFM)
You can swap your face from a webcam or the face in the video using trained face models. You can swap your face from a webcam or the face in the video using trained face models.
@ -242,6 +242,21 @@ Here is an <a href="https://www.tiktok.com/@arnoldschwarzneggar/video/6995538782
<tr><td colspan=2 align="center"> <tr><td colspan=2 align="center">
## Face Swap (Insight)
You can swap your face from a webcam or the face in the video using your own single photo.
<img src="doc/lukashenko.png" width=128></img>
<img src="doc/insight_faceswap_example.gif"></img>
</td></tr>
</table>
<table align="center" border="0">
<tr><td colspan=2 align="center">
## Face Animator ## Face Animator
There is also a Face Animator module in DeepFaceLive app. You can control a static face picture using video or your own face from the camera. The quality is not the best, and requires fine face matching and tuning parameters for every face pair, but enough for funny videos and memes or real-time streaming at 25 fps using 35 TFLOPS GPU. There is also a Face Animator module in DeepFaceLive app. You can control a static face picture using video or your own face from the camera. The quality is not the best, and requires fine face matching and tuning parameters for every face pair, but enough for funny videos and memes or real-time streaming at 25 fps using 35 TFLOPS GPU.

View file

@ -11,18 +11,19 @@ from xlib.qt.widgets.QXLabel import QXLabel
from . import backend from . import backend
from .ui.QCameraSource import QCameraSource from .ui.QCameraSource import QCameraSource
from .ui.QFaceAligner import QFaceAligner from .ui.QFaceAligner import QFaceAligner
from .ui.QFaceAnimator import QFaceAnimator
from .ui.QFaceDetector import QFaceDetector from .ui.QFaceDetector import QFaceDetector
from .ui.QFaceMarker import QFaceMarker from .ui.QFaceMarker import QFaceMarker
from .ui.QFaceMerger import QFaceMerger from .ui.QFaceMerger import QFaceMerger
from .ui.QFaceAnimator import QFaceAnimator from .ui.QFaceSwapInsight import QFaceSwapInsight
from .ui.QFaceSwapper import QFaceSwapper from .ui.QFaceSwapDFM import QFaceSwapDFM
from .ui.QFileSource import QFileSource from .ui.QFileSource import QFileSource
from .ui.QFrameAdjuster import QFrameAdjuster from .ui.QFrameAdjuster import QFrameAdjuster
from .ui.QStreamOutput import QStreamOutput from .ui.QStreamOutput import QStreamOutput
from .ui.widgets.QBCFaceAlignViewer import QBCFaceAlignViewer from .ui.widgets.QBCFaceAlignViewer import QBCFaceAlignViewer
from .ui.widgets.QBCFaceSwapViewer import QBCFaceSwapViewer from .ui.widgets.QBCFaceSwapViewer import QBCFaceSwapViewer
from .ui.widgets.QBCMergedFrameViewer import QBCMergedFrameViewer
from .ui.widgets.QBCFrameViewer import QBCFrameViewer from .ui.widgets.QBCFrameViewer import QBCFrameViewer
from .ui.widgets.QBCMergedFrameViewer import QBCMergedFrameViewer
class QLiveSwap(qtx.QXWidget): class QLiveSwap(qtx.QXWidget):
@ -58,13 +59,13 @@ class QLiveSwap(qtx.QXWidget):
face_marker = self.face_marker = backend.FaceMarker (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_detector_bc_out, bc_out=face_marker_bc_out, backend_db=backend_db) face_marker = self.face_marker = backend.FaceMarker (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_detector_bc_out, bc_out=face_marker_bc_out, backend_db=backend_db)
face_aligner = self.face_aligner = backend.FaceAligner (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_marker_bc_out, bc_out=face_aligner_bc_out, backend_db=backend_db ) face_aligner = self.face_aligner = backend.FaceAligner (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_marker_bc_out, bc_out=face_aligner_bc_out, backend_db=backend_db )
face_animator = self.face_animator = backend.FaceAnimator (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_merger_bc_out, animatables_path=animatables_path, backend_db=backend_db ) face_animator = self.face_animator = backend.FaceAnimator (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_merger_bc_out, animatables_path=animatables_path, backend_db=backend_db )
face_swap_insight = self.face_swap_insight = backend.FaceSwapInsight (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, faces_path=animatables_path, backend_db=backend_db )
face_swapper = self.face_swapper = backend.FaceSwapper (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, dfm_models_path=dfm_models_path, backend_db=backend_db ) face_swap_dfm = self.face_swap_dfm = backend.FaceSwapDFM (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, dfm_models_path=dfm_models_path, backend_db=backend_db )
frame_adjuster = self.frame_adjuster = backend.FrameAdjuster(weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_swapper_bc_out, bc_out=frame_adjuster_bc_out, backend_db=backend_db ) frame_adjuster = self.frame_adjuster = backend.FrameAdjuster(weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_swapper_bc_out, bc_out=frame_adjuster_bc_out, backend_db=backend_db )
face_merger = self.face_merger = backend.FaceMerger (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=frame_adjuster_bc_out, bc_out=face_merger_bc_out, backend_db=backend_db ) face_merger = self.face_merger = backend.FaceMerger (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=frame_adjuster_bc_out, bc_out=face_merger_bc_out, backend_db=backend_db )
stream_output = self.stream_output = backend.StreamOutput (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_merger_bc_out, save_default_path=userdata_path, backend_db=backend_db) stream_output = self.stream_output = backend.StreamOutput (weak_heap=backend_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_merger_bc_out, save_default_path=userdata_path, backend_db=backend_db)
self.all_backends : List[backend.BackendHost] = [file_source, camera_source, face_detector, face_marker, face_aligner, face_animator, face_swapper, frame_adjuster, face_merger, stream_output] self.all_backends : List[backend.BackendHost] = [file_source, camera_source, face_detector, face_marker, face_aligner, face_animator, face_swap_insight, face_swap_dfm, frame_adjuster, face_merger, stream_output]
self.q_file_source = QFileSource(self.file_source) self.q_file_source = QFileSource(self.file_source)
self.q_camera_source = QCameraSource(self.camera_source) self.q_camera_source = QCameraSource(self.camera_source)
@ -72,7 +73,8 @@ class QLiveSwap(qtx.QXWidget):
self.q_face_marker = QFaceMarker(self.face_marker) self.q_face_marker = QFaceMarker(self.face_marker)
self.q_face_aligner = QFaceAligner(self.face_aligner) self.q_face_aligner = QFaceAligner(self.face_aligner)
self.q_face_animator = QFaceAnimator(self.face_animator, animatables_path=animatables_path) self.q_face_animator = QFaceAnimator(self.face_animator, animatables_path=animatables_path)
self.q_face_swapper = QFaceSwapper(self.face_swapper, dfm_models_path=dfm_models_path) self.q_face_swap_insight = QFaceSwapInsight(self.face_swap_insight, faces_path=animatables_path)
self.q_face_swap_dfm = QFaceSwapDFM(self.face_swap_dfm, dfm_models_path=dfm_models_path)
self.q_frame_adjuster = QFrameAdjuster(self.frame_adjuster) self.q_frame_adjuster = QFrameAdjuster(self.frame_adjuster)
self.q_face_merger = QFaceMerger(self.face_merger) self.q_face_merger = QFaceMerger(self.face_merger)
self.q_stream_output = QStreamOutput(self.stream_output) self.q_stream_output = QStreamOutput(self.stream_output)
@ -83,8 +85,8 @@ class QLiveSwap(qtx.QXWidget):
self.q_ds_merged_frame_viewer = QBCMergedFrameViewer(backend_weak_heap, face_merger_bc_out) self.q_ds_merged_frame_viewer = QBCMergedFrameViewer(backend_weak_heap, face_merger_bc_out)
q_nodes = qtx.QXWidgetHBox([ qtx.QXWidgetVBox([self.q_file_source, self.q_camera_source], spacing=5, fixed_width=256), q_nodes = qtx.QXWidgetHBox([ qtx.QXWidgetVBox([self.q_file_source, self.q_camera_source], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_face_detector, self.q_face_aligner,], spacing=5, fixed_width=256), qtx.QXWidgetVBox([self.q_face_detector, self.q_face_aligner, ], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_face_marker, self.q_face_animator, self.q_face_swapper], spacing=5, fixed_width=256), qtx.QXWidgetVBox([self.q_face_marker, self.q_face_animator, self.q_face_swap_insight, self.q_face_swap_dfm], spacing=5, fixed_width=256),
qtx.QXWidgetVBox([self.q_frame_adjuster, self.q_face_merger, self.q_stream_output], spacing=5, fixed_width=256), qtx.QXWidgetVBox([self.q_frame_adjuster, self.q_face_merger, self.q_stream_output], spacing=5, fixed_width=256),
], spacing=5, size_policy=('fixed', 'fixed') ) ], spacing=5, size_policy=('fixed', 'fixed') )
@ -112,7 +114,7 @@ class QLiveSwap(qtx.QXWidget):
def initialize(self): def initialize(self):
for bcknd in self.all_backends: for bcknd in self.all_backends:
default_state = True default_state = True
if isinstance(bcknd, (backend.CameraSource, backend.FaceAnimator) ): if isinstance(bcknd, (backend.CameraSource, backend.FaceAnimator, backend.FaceSwapInsight) ):
default_state = False default_state = False
bcknd.restore_on_off_state(default_state=default_state) bcknd.restore_on_off_state(default_state=default_state)

View file

@ -14,13 +14,13 @@ from .BackendBase import (BackendConnection, BackendDB, BackendHost,
BackendWorkerState) BackendWorkerState)
class FaceSwapper(BackendHost): class FaceSwapDFM(BackendHost):
def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, dfm_models_path : Path, backend_db : BackendDB = None, def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, dfm_models_path : Path, backend_db : BackendDB = None,
id : int = 0): id : int = 0):
self._id = id self._id = id
super().__init__(backend_db=backend_db, super().__init__(backend_db=backend_db,
sheet_cls=Sheet, sheet_cls=Sheet,
worker_cls=FaceSwapperWorker, worker_cls=FaceSwapDFMWorker,
worker_state_cls=WorkerState, worker_state_cls=WorkerState,
worker_start_args=[weak_heap, reemit_frame_signal, bc_in, bc_out, dfm_models_path]) worker_start_args=[weak_heap, reemit_frame_signal, bc_in, bc_out, dfm_models_path])
@ -29,7 +29,7 @@ class FaceSwapper(BackendHost):
def _get_name(self): def _get_name(self):
return super()._get_name()# + f'{self._id}' return super()._get_name()# + f'{self._id}'
class FaceSwapperWorker(BackendWorker): class FaceSwapDFMWorker(BackendWorker):
def get_state(self) -> 'WorkerState': return super().get_state() def get_state(self) -> 'WorkerState': return super().get_state()
def get_control_sheet(self) -> 'Sheet.Worker': return super().get_control_sheet() def get_control_sheet(self) -> 'Sheet.Worker': return super().get_control_sheet()
@ -218,7 +218,7 @@ class FaceSwapperWorker(BackendWorker):
if events.new_status_downloading: if events.new_status_downloading:
self.set_busy(False) self.set_busy(False)
cs.model_dl_progress.enable() cs.model_dl_progress.enable()
cs.model_dl_progress.set_config( lib_csw.Progress.Config(title='@FaceSwapper.downloading_model') ) cs.model_dl_progress.set_config( lib_csw.Progress.Config(title='@FaceSwapDFM.downloading_model') )
cs.model_dl_progress.set_progress(0) cs.model_dl_progress.set_progress(0)
elif events.new_status_initialized: elif events.new_status_initialized:
@ -229,12 +229,12 @@ class FaceSwapperWorker(BackendWorker):
cs.model_info_label.enable() cs.model_info_label.enable()
cs.model_info_label.set_config( lib_csw.InfoLabel.Config(info_icon=True, cs.model_info_label.set_config( lib_csw.InfoLabel.Config(info_icon=True,
info_lines=[f'@FaceSwapper.model_information', info_lines=[f'@FaceSwapDFM.model_information',
'', '',
f'@FaceSwapper.filename', f'@FaceSwapDFM.filename',
f'{self.dfm_model.get_model_path().name}', f'{self.dfm_model.get_model_path().name}',
'', '',
f'@FaceSwapper.resolution', f'@FaceSwapDFM.resolution',
f'{model_width}x{model_height}']) ) f'{model_width}x{model_height}']) )
cs.swap_all_faces.enable() cs.swap_all_faces.enable()

View file

@ -0,0 +1,269 @@
import time
from pathlib import Path
import cv2
import numpy as np
from modelhub.onnx import InsightFace2D106, InsightFaceSwap, YoloV5Face
from xlib import cv as lib_cv2
from xlib import os as lib_os
from xlib import path as lib_path
from xlib.face import ELandmarks2D, FLandmarks2D, FRect
from xlib.image.ImageProcessor import ImageProcessor
from xlib.mp import csw as lib_csw
from .BackendBase import (BackendConnection, BackendDB, BackendHost,
BackendSignal, BackendWeakHeap, BackendWorker,
BackendWorkerState)
class FaceSwapInsight(BackendHost):
def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, faces_path : Path, backend_db : BackendDB = None,
id : int = 0):
self._id = id
super().__init__(backend_db=backend_db,
sheet_cls=Sheet,
worker_cls=FaceSwapInsightWorker,
worker_state_cls=WorkerState,
worker_start_args=[weak_heap, reemit_frame_signal, bc_in, bc_out, faces_path])
def get_control_sheet(self) -> 'Sheet.Host': return super().get_control_sheet()
def _get_name(self):
return super()._get_name()
class FaceSwapInsightWorker(BackendWorker):
def get_state(self) -> 'WorkerState': return super().get_state()
def get_control_sheet(self) -> 'Sheet.Worker': return super().get_control_sheet()
def on_start(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, faces_path : Path):
self.weak_heap = weak_heap
self.reemit_frame_signal = reemit_frame_signal
self.bc_in = bc_in
self.bc_out = bc_out
self.faces_path = faces_path
self.pending_bcd = None
self.swap_model : InsightFaceSwap = None
self.target_face_img = None
self.face_vector = None
lib_os.set_timer_resolution(1)
state, cs = self.get_state(), self.get_control_sheet()
cs.device.call_on_selected(self.on_cs_device)
cs.face.call_on_selected(self.on_cs_face)
cs.adjust_c.call_on_number(self.on_cs_adjust_c)
cs.adjust_x.call_on_number(self.on_cs_adjust_x)
cs.adjust_y.call_on_number(self.on_cs_adjust_y)
cs.animator_face_id.call_on_number(self.on_cs_animator_face_id)
cs.update_faces.call_on_signal(self.update_faces)
cs.device.enable()
cs.device.set_choices( InsightFaceSwap.get_available_devices(), none_choice_name='@misc.menu_select')
cs.device.select(state.device)
def update_faces(self):
state, cs = self.get_state(), self.get_control_sheet()
cs.face.set_choices([face_path.name for face_path in lib_path.get_files_paths(self.faces_path, extensions=['.jpg','.jpeg','.png'])], none_choice_name='@misc.menu_select')
def on_cs_device(self, idx, device):
state, cs = self.get_state(), self.get_control_sheet()
if device is not None and state.device == device:
self.swap_model = InsightFaceSwap(device)
self.face_detector = YoloV5Face(device)
self.face_marker = InsightFace2D106(device)
cs.face.enable()
self.update_faces()
cs.face.select(state.face)
cs.adjust_c.enable()
cs.adjust_c.set_config(lib_csw.Number.Config(min=1.0, max=2.0, step=0.01, decimals=2, allow_instant_update=True))
adjust_c = state.adjust_c
if adjust_c is None:
adjust_c = 1.55
cs.adjust_c.set_number(adjust_c)
cs.adjust_x.enable()
cs.adjust_x.set_config(lib_csw.Number.Config(min=-0.5, max=0.5, step=0.01, decimals=2, allow_instant_update=True))
adjust_x = state.adjust_x
if adjust_x is None:
adjust_x = 0.0
cs.adjust_x.set_number(adjust_x)
cs.adjust_y.enable()
cs.adjust_y.set_config(lib_csw.Number.Config(min=-0.5, max=0.5, step=0.01, decimals=2, allow_instant_update=True))
adjust_y = state.adjust_y
if adjust_y is None:
adjust_y = -0.15
cs.adjust_y.set_number(adjust_y)
cs.animator_face_id.enable()
cs.animator_face_id.set_config(lib_csw.Number.Config(min=0, max=16, step=1, decimals=0, allow_instant_update=True))
cs.animator_face_id.set_number(state.animator_face_id if state.animator_face_id is not None else 0)
cs.update_faces.enable()
else:
state.device = device
self.save_state()
self.restart()
def on_cs_face(self, idx, face):
state, cs = self.get_state(), self.get_control_sheet()
state.face = face
self.face_vector = None
self.target_face_img = None
if face is not None:
try:
self.target_face_img = lib_cv2.imread(self.faces_path / face)
except Exception as e:
cs.face.unselect()
self.save_state()
self.reemit_frame_signal.send()
def on_cs_adjust_c(self, adjust_c):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.adjust_c.get_config()
adjust_c = state.adjust_c = np.clip(adjust_c, cfg.min, cfg.max)
cs.adjust_c.set_number(adjust_c)
self.face_vector = None
self.save_state()
self.reemit_frame_signal.send()
def on_cs_adjust_x(self, adjust_x):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.adjust_x.get_config()
adjust_x = state.adjust_x = np.clip(adjust_x, cfg.min, cfg.max)
cs.adjust_x.set_number(adjust_x)
self.face_vector = None
self.save_state()
self.reemit_frame_signal.send()
def on_cs_adjust_y(self, adjust_y):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.adjust_y.get_config()
adjust_y = state.adjust_y = np.clip(adjust_y, cfg.min, cfg.max)
cs.adjust_y.set_number(adjust_y)
self.face_vector = None
self.save_state()
self.reemit_frame_signal.send()
def on_cs_animator_face_id(self, animator_face_id):
state, cs = self.get_state(), self.get_control_sheet()
cfg = cs.animator_face_id.get_config()
animator_face_id = state.animator_face_id = int(np.clip(animator_face_id, cfg.min, cfg.max))
cs.animator_face_id.set_number(animator_face_id)
self.save_state()
self.reemit_frame_signal.send()
def on_tick(self):
state, cs = self.get_state(), self.get_control_sheet()
if self.pending_bcd is None:
self.start_profile_timing()
bcd = self.bc_in.read(timeout=0.005)
if bcd is not None:
bcd.assign_weak_heap(self.weak_heap)
if self.face_vector is None and self.target_face_img is not None:
rects = self.face_detector.extract (self.target_face_img, threshold=0.5)[0]
if len(rects) > 0:
_,H,W,_ = ImageProcessor(self.target_face_img).get_dims()
u_rects = [ FRect.from_ltrb( (l/W, t/H, r/W, b/H) ) for l,t,r,b in rects ]
face_urect = FRect.sort_by_area_size(u_rects)[0] # sorted by largest
face_image, face_uni_mat = face_urect.cut(self.target_face_img, 1.6, 192)
lmrks = self.face_marker.extract(face_image)[0]
lmrks = lmrks[...,0:2] / (192,192)
face_ulmrks = FLandmarks2D.create (ELandmarks2D.L106, lmrks).transform(face_uni_mat, invert=True)
face_align_img, _ = face_ulmrks.cut(self.target_face_img, state.adjust_c,
self.swap_model.get_face_vector_input_size(),
x_offset=state.adjust_x,
y_offset=state.adjust_y)
self.face_vector = self.swap_model.get_face_vector(face_align_img)
swap_model = self.swap_model
if swap_model is not None and self.face_vector is not None:
for i, fsi in enumerate(bcd.get_face_swap_info_list()):
if state.animator_face_id == i:
face_align_image = bcd.get_image(fsi.face_align_image_name)
if face_align_image is not None:
_,H,W,_ = ImageProcessor(face_align_image).get_dims()
anim_image = swap_model.generate(face_align_image, self.face_vector)
anim_image = ImageProcessor(anim_image).resize((W,H)).get_image('HWC')
fsi.face_align_mask_name = f'{fsi.face_align_image_name}_mask'
fsi.face_swap_image_name = f'{fsi.face_align_image_name}_swapped'
fsi.face_swap_mask_name = f'{fsi.face_swap_image_name}_mask'
bcd.set_image(fsi.face_swap_image_name, anim_image)
white_mask = np.full_like(anim_image, 255, dtype=np.uint8)
bcd.set_image(fsi.face_align_mask_name, white_mask)
bcd.set_image(fsi.face_swap_mask_name, white_mask)
break
self.stop_profile_timing()
self.pending_bcd = bcd
if self.pending_bcd is not None:
if self.bc_out.is_full_read(1):
self.bc_out.write(self.pending_bcd)
self.pending_bcd = None
else:
time.sleep(0.001)
class Sheet:
class Host(lib_csw.Sheet.Host):
def __init__(self):
super().__init__()
self.device = lib_csw.DynamicSingleSwitch.Client()
self.face = lib_csw.DynamicSingleSwitch.Client()
self.animator_face_id = lib_csw.Number.Client()
self.update_faces = lib_csw.Signal.Client()
self.adjust_c = lib_csw.Number.Client()
self.adjust_x = lib_csw.Number.Client()
self.adjust_y = lib_csw.Number.Client()
class Worker(lib_csw.Sheet.Worker):
def __init__(self):
super().__init__()
self.device = lib_csw.DynamicSingleSwitch.Host()
self.face = lib_csw.DynamicSingleSwitch.Host()
self.animator_face_id = lib_csw.Number.Host()
self.update_faces = lib_csw.Signal.Host()
self.adjust_c = lib_csw.Number.Host()
self.adjust_x = lib_csw.Number.Host()
self.adjust_y = lib_csw.Number.Host()
class WorkerState(BackendWorkerState):
device = None
face : str = None
animator_face_id : int = None
adjust_c : float = None
adjust_x : float = None
adjust_y : float = None

View file

@ -7,7 +7,8 @@ from .FaceAnimator import FaceAnimator
from .FaceDetector import FaceDetector from .FaceDetector import FaceDetector
from .FaceMarker import FaceMarker from .FaceMarker import FaceMarker
from .FaceMerger import FaceMerger from .FaceMerger import FaceMerger
from .FaceSwapper import FaceSwapper from .FaceSwapInsight import FaceSwapInsight
from .FaceSwapDFM import FaceSwapDFM
from .FileSource import FileSource from .FileSource import FileSource
from .FrameAdjuster import FrameAdjuster from .FrameAdjuster import FrameAdjuster
from .StreamOutput import StreamOutput from .StreamOutput import StreamOutput

View file

@ -4,7 +4,7 @@ from localization import L
from resources.gfx import QXImageDB from resources.gfx import QXImageDB
from xlib import qt as qtx from xlib import qt as qtx
from ..backend import FaceSwapper from ..backend import FaceSwapDFM
from .widgets.QBackendPanel import QBackendPanel from .widgets.QBackendPanel import QBackendPanel
from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag
from .widgets.QComboBoxCSWDynamicSingleSwitch import \ from .widgets.QComboBoxCSWDynamicSingleSwitch import \
@ -17,8 +17,8 @@ from .widgets.QSliderCSWNumber import QSliderCSWNumber
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
class QFaceSwapper(QBackendPanel): class QFaceSwapDFM(QBackendPanel):
def __init__(self, backend : FaceSwapper, dfm_models_path : Path): def __init__(self, backend : FaceSwapDFM, dfm_models_path : Path):
self._dfm_models_path = dfm_models_path self._dfm_models_path = dfm_models_path
cs = backend.get_control_sheet() cs = backend.get_control_sheet()
@ -28,7 +28,7 @@ class QFaceSwapper(QBackendPanel):
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') ) q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label]) q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
q_model_label = QLabelPopupInfo(label=L('@QFaceSwapper.model'), popup_info_text=L('@QFaceSwapper.help.model') ) q_model_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.model'), popup_info_text=L('@QFaceSwapDFM.help.model') )
q_model = QComboBoxCSWDynamicSingleSwitch(cs.model, reflect_state_widgets=[q_model_label, btn_open_folder]) q_model = QComboBoxCSWDynamicSingleSwitch(cs.model, reflect_state_widgets=[q_model_label, btn_open_folder])
q_model_dl_error = self._q_model_dl_error = QErrorCSWError(cs.model_dl_error) q_model_dl_error = self._q_model_dl_error = QErrorCSWError(cs.model_dl_error)
@ -36,31 +36,31 @@ class QFaceSwapper(QBackendPanel):
q_model_info_label = self._q_model_info_label = QLabelPopupInfoCSWInfoLabel(cs.model_info_label) q_model_info_label = self._q_model_info_label = QLabelPopupInfoCSWInfoLabel(cs.model_info_label)
q_swap_all_faces_label = QLabelPopupInfo(label=L('@QFaceSwapper.swap_all_faces') ) q_swap_all_faces_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.swap_all_faces') )
q_swap_all_faces = QCheckBoxCSWFlag(cs.swap_all_faces, reflect_state_widgets=[q_swap_all_faces_label]) q_swap_all_faces = QCheckBoxCSWFlag(cs.swap_all_faces, reflect_state_widgets=[q_swap_all_faces_label])
q_face_id_label = QLabelPopupInfo(label=L('@QFaceSwapper.face_id'), popup_info_text=L('@QFaceSwapper.help.face_id') ) q_face_id_label = QLabelPopupInfo(label=L('@common.face_id'), popup_info_text=L('@QFaceSwapDFM.help.face_id') )
q_face_id = QSpinBoxCSWNumber(cs.face_id, reflect_state_widgets=[q_face_id_label]) q_face_id = QSpinBoxCSWNumber(cs.face_id, reflect_state_widgets=[q_face_id_label])
q_morph_factor_label = QLabelPopupInfo(label=L('@QFaceSwapper.morph_factor'), popup_info_text=L('@QFaceSwapper.help.morph_factor') ) q_morph_factor_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.morph_factor'), popup_info_text=L('@QFaceSwapDFM.help.morph_factor') )
q_morph_factor = QSliderCSWNumber(cs.morph_factor, reflect_state_widgets=[q_morph_factor_label]) q_morph_factor = QSliderCSWNumber(cs.morph_factor, reflect_state_widgets=[q_morph_factor_label])
q_sharpen_amount_label = QLabelPopupInfo(label=L('@QFaceSwapper.presharpen_amount'), popup_info_text=L('@QFaceSwapper.help.presharpen_amount') ) q_sharpen_amount_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.presharpen_amount'), popup_info_text=L('@QFaceSwapDFM.help.presharpen_amount') )
q_sharpen_amount = QSliderCSWNumber(cs.presharpen_amount, reflect_state_widgets=[q_sharpen_amount_label]) q_sharpen_amount = QSliderCSWNumber(cs.presharpen_amount, reflect_state_widgets=[q_sharpen_amount_label])
q_pre_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapper.pregamma'), popup_info_text=L('@QFaceSwapper.help.pregamma') ) q_pre_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.pregamma'), popup_info_text=L('@QFaceSwapDFM.help.pregamma') )
q_pre_gamma_red = QSpinBoxCSWNumber(cs.pre_gamma_red, reflect_state_widgets=[q_pre_gamma_label]) q_pre_gamma_red = QSpinBoxCSWNumber(cs.pre_gamma_red, reflect_state_widgets=[q_pre_gamma_label])
q_pre_gamma_green = QSpinBoxCSWNumber(cs.pre_gamma_green) q_pre_gamma_green = QSpinBoxCSWNumber(cs.pre_gamma_green)
q_pre_gamma_blue = QSpinBoxCSWNumber(cs.pre_gamma_blue) q_pre_gamma_blue = QSpinBoxCSWNumber(cs.pre_gamma_blue)
q_post_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapper.postgamma')) q_post_gamma_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.postgamma'))
q_post_gamma_red = QSpinBoxCSWNumber(cs.post_gamma_red, reflect_state_widgets=[q_post_gamma_label]) q_post_gamma_red = QSpinBoxCSWNumber(cs.post_gamma_red, reflect_state_widgets=[q_post_gamma_label])
q_post_gamma_green = QSpinBoxCSWNumber(cs.post_gamma_green) q_post_gamma_green = QSpinBoxCSWNumber(cs.post_gamma_green)
q_post_gamma_blue = QSpinBoxCSWNumber(cs.post_gamma_blue) q_post_gamma_blue = QSpinBoxCSWNumber(cs.post_gamma_blue)
q_two_pass_label = QLabelPopupInfo(label=L('@QFaceSwapper.two_pass'), popup_info_text=L('@QFaceSwapper.help.two_pass') ) q_two_pass_label = QLabelPopupInfo(label=L('@QFaceSwapDFM.two_pass'), popup_info_text=L('@QFaceSwapDFM.help.two_pass') )
q_two_pass = QCheckBoxCSWFlag(cs.two_pass, reflect_state_widgets=[q_two_pass_label]) q_two_pass = QCheckBoxCSWFlag(cs.two_pass, reflect_state_widgets=[q_two_pass_label])
grid_l = qtx.QXGridLayout( spacing=5) grid_l = qtx.QXGridLayout( spacing=5)
@ -94,7 +94,7 @@ class QFaceSwapper(QBackendPanel):
grid_l.addWidget(q_two_pass, row, 1) grid_l.addWidget(q_two_pass, row, 1)
row += 1 row += 1
super().__init__(backend, L('@QFaceSwapper.module_title'), super().__init__(backend, L('@QFaceSwapDFM.module_title'),
layout=qtx.QXVBoxLayout([grid_l]) ) layout=qtx.QXVBoxLayout([grid_l]) )

View file

@ -0,0 +1,71 @@
from pathlib import Path
from localization import L
from resources.gfx import QXImageDB
from xlib import qt as qtx
from ..backend import FaceSwapInsight
from .widgets.QBackendPanel import QBackendPanel
from .widgets.QComboBoxCSWDynamicSingleSwitch import \
QComboBoxCSWDynamicSingleSwitch
from .widgets.QLabelPopupInfo import QLabelPopupInfo
from .widgets.QSliderCSWNumber import QSliderCSWNumber
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
from .widgets.QXPushButtonCSWSignal import QXPushButtonCSWSignal
class QFaceSwapInsight(QBackendPanel):
def __init__(self, backend : FaceSwapInsight, faces_path : Path):
self._faces_path = faces_path
cs = backend.get_control_sheet()
btn_open_folder = self.btn_open_folder = qtx.QXPushButton(image = QXImageDB.eye_outline('light gray'), tooltip_text='Reveal in Explorer', released=self._btn_open_folder_released, fixed_size=(24,22) )
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
q_face_label = QLabelPopupInfo(label=L('@QFaceSwapInsight.face') )
q_face = QComboBoxCSWDynamicSingleSwitch(cs.face, reflect_state_widgets=[q_face_label, btn_open_folder])
q_adjust_c_label = QLabelPopupInfo(label='C')
q_adjust_c = QSliderCSWNumber(cs.adjust_c, reflect_state_widgets=[q_adjust_c_label])
q_adjust_x_label = QLabelPopupInfo(label='X')
q_adjust_x = QSliderCSWNumber(cs.adjust_x, reflect_state_widgets=[q_adjust_x_label])
q_adjust_y_label = QLabelPopupInfo(label='Y')
q_adjust_y = QSliderCSWNumber(cs.adjust_y, reflect_state_widgets=[q_adjust_y_label])
q_animator_face_id_label = QLabelPopupInfo(label=L('@common.face_id') )
q_animator_face_id = QSpinBoxCSWNumber(cs.animator_face_id, reflect_state_widgets=[q_animator_face_id_label])
q_update_faces = QXPushButtonCSWSignal(cs.update_faces, image=QXImageDB.reload_outline('light gray'), button_size=(24,22) )
grid_l = qtx.QXGridLayout( spacing=5)
row = 0
grid_l.addWidget(q_device_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_device, row, 1, alignment=qtx.AlignLeft )
row += 1
grid_l.addWidget(q_face_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addLayout(qtx.QXHBoxLayout([q_face, 2, btn_open_folder, 2, q_update_faces]), row, 1 )
row += 1
grid_l.addWidget(q_adjust_c_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_adjust_c, row, 1 )
row += 1
grid_l.addWidget(q_adjust_x_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_adjust_x, row, 1 )
row += 1
grid_l.addWidget(q_adjust_y_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_adjust_y, row, 1 )
row += 1
grid_l.addWidget(q_animator_face_id_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
grid_l.addWidget(q_animator_face_id, row, 1, alignment=qtx.AlignLeft )
row += 1
super().__init__(backend, L('@QFaceSwapInsight.module_title'),
layout=qtx.QXVBoxLayout([grid_l]) )
def _btn_open_folder_released(self):
qtx.QDesktopServices.openUrl(qtx.QUrl.fromLocalFile( str(self._faces_path) ))

View file

@ -10,7 +10,7 @@ RUN ln -s /usr/bin/python3 /usr/bin/python
RUN git clone https://github.com/iperov/DeepFaceLive.git RUN git clone https://github.com/iperov/DeepFaceLive.git
RUN python -m pip install --upgrade pip RUN python -m pip install --upgrade pip
RUN python -m pip install onnxruntime-gpu==1.14.1 numpy==1.21.6 h5py numexpr protobuf==3.20.1 opencv-python==4.7.0.72 opencv-contrib-python==4.7.0.72 pyqt6==6.5.0 onnx==1.13.0 torch==1.13.1 torchvision==0.14.1 RUN python -m pip install onnxruntime-gpu==1.15.1 numpy==1.21.6 h5py numexpr protobuf==3.20.1 opencv-python==4.8.0.74 opencv-contrib-python==4.8.0.74 pyqt6==6.5.1 onnx==1.14.0 torch==1.13.1 torchvision==0.14.1
RUN apt install -y libnvidia-compute-$NV_VER RUN apt install -y libnvidia-compute-$NV_VER

Binary file not shown.

View file

@ -476,19 +476,19 @@ def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.8.10', back
builder.install_pip_package('h5py') builder.install_pip_package('h5py')
builder.install_pip_package('numexpr') builder.install_pip_package('numexpr')
builder.install_pip_package('protobuf==3.20.1') builder.install_pip_package('protobuf==3.20.1')
builder.install_pip_package('opencv-python==4.7.0.72') builder.install_pip_package('opencv-python==4.8.0.74')
builder.install_pip_package('opencv-contrib-python==4.7.0.72') builder.install_pip_package('opencv-contrib-python==4.8.0.74')
builder.install_pip_package('pyqt6==6.5.0') builder.install_pip_package('pyqt6==6.5.1')
builder.install_pip_package('onnx==1.13.0') builder.install_pip_package('onnx==1.14.0')
if backend == 'cuda': if backend == 'cuda':
#builder.install_pip_package('torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html') #builder.install_pip_package('torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html')
#builder.install_pip_package('torch==1.11.0+cu115 torchvision==0.12.0+cu115 -f https://download.pytorch.org/whl/torch_stable.html') #builder.install_pip_package('torch==1.11.0+cu115 torchvision==0.12.0+cu115 -f https://download.pytorch.org/whl/torch_stable.html')
builder.install_pip_package('torch==1.13.1+cu117 torchvision==0.14.1+cu117 -f https://download.pytorch.org/whl/torch_stable.html') builder.install_pip_package('torch==1.13.1+cu117 torchvision==0.14.1+cu117 -f https://download.pytorch.org/whl/torch_stable.html')
builder.install_pip_package('onnxruntime-gpu==1.14.1') builder.install_pip_package('onnxruntime-gpu==1.15.1')
elif backend == 'directml': elif backend == 'directml':
builder.install_pip_package('onnxruntime-directml==1.14.1') builder.install_pip_package('onnxruntime-directml==1.15.1')
builder.install_ffmpeg_binaries() builder.install_ffmpeg_binaries()

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 MiB

BIN
doc/lukashenko.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 311 KiB

View file

@ -65,7 +65,16 @@ class Localization:
'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere fps più alti o un minore utilizzo della CPU.', 'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere fps più alti o un minore utilizzo della CPU.',
'ja-JP' : 'モジュールデバイスの組み合わせを調整し、より高いfpsや低いCPU使用率を実現します', 'ja-JP' : 'モジュールデバイスの組み合わせを調整し、より高いfpsや低いCPU使用率を実現します',
'de-DE' : 'Passen Sie die Kombination der geräte in den Modulen an, um höhere Bildraten oder eine geringere CPU-Auslastung zu erzielen.'}, 'de-DE' : 'Passen Sie die Kombination der geräte in den Modulen an, um höhere Bildraten oder eine geringere CPU-Auslastung zu erzielen.'},
'common.face_id':{
'en-US' : 'Face ID',
'ru-RU' : 'Номер лица',
'zh-CN' : '人脸ID',
'es-ES' : 'ID de la cara',
'it-IT' : 'ID della faccia',
'ja-JP' : '顔のID番号',
'de-DE' : 'Gesichts ID'},
'QBackendPanel.start':{ 'QBackendPanel.start':{
'en-US' : 'Start', 'en-US' : 'Start',
'ru-RU' : 'Запустить', 'ru-RU' : 'Запустить',
@ -751,17 +760,44 @@ class Localization:
'it-IT' : 'Azzeramento della posa di riferimento', 'it-IT' : 'Azzeramento della posa di riferimento',
'ja-JP' : '元の画像に戻す', 'ja-JP' : '元の画像に戻す',
'de-DE' : 'Referenzpose zurücksetzen'}, 'de-DE' : 'Referenzpose zurücksetzen'},
'QFaceSwapInsight.module_title':{
'en-US' : 'Face swap (Insight)',
'ru-RU' : 'Замена лица (Insight)',
'zh-CN' : 'Face swap (Insight)',
'es-ES' : 'Face swap (Insight)',
'it-IT' : 'Face swap (Insight)',
'ja-JP' : 'Face swap (Insight)',
'de-DE' : 'Face swap (Insight)'},
'QFaceSwapInsight.face':{
'en-US' : 'Face',
'ru-RU' : 'Лицо',
'zh-CN' : 'Face',
'es-ES' : 'Face',
'it-IT' : 'Face',
'ja-JP' : 'Face',
'de-DE' : 'Face'},
'QFaceSwapInsight.match':{
'en-US' : 'Match',
'ru-RU' : 'Соответствие',
'zh-CN' : 'Match',
'es-ES' : 'Match',
'it-IT' : 'Match',
'ja-JP' : 'Match',
'de-DE' : 'Match'},
'QFaceSwapDFM.module_title':{
'en-US' : 'Face swap (DFM)',
'ru-RU' : 'Замена лица (DFM)',
'zh-CN' : '人脸交换器 (DFM)',
'es-ES' : 'Intercambiador de caras (DFM)',
'it-IT' : 'Scambiatore di facce (DFM)',
'ja-JP' : '顔の入れ替え (DFM)',
'de-DE' : 'Gesichtstauscher (DFM)'},
'QFaceSwapper.module_title':{ 'QFaceSwapDFM.model':{
'en-US' : 'Face swapper',
'ru-RU' : 'Замена лица',
'zh-CN' : '人脸交换器',
'es-ES' : 'Intercambiador de caras',
'it-IT' : 'Scambiatore di facce',
'ja-JP' : '顔の入れ替え',
'de-DE' : 'Gesichtstauscher'},
'QFaceSwapper.model':{
'en-US' : 'Model', 'en-US' : 'Model',
'ru-RU' : 'Модель', 'ru-RU' : 'Модель',
'zh-CN' : '模型', 'zh-CN' : '模型',
@ -770,7 +806,7 @@ class Localization:
'ja-JP' : 'モデル', 'ja-JP' : 'モデル',
'de-DE' : 'Modell'}, 'de-DE' : 'Modell'},
'QFaceSwapper.help.model':{ 'QFaceSwapDFM.help.model':{
'en-US' : 'Model file from a folder or available for download from the Internet.\nYou can train your own model in DeepFaceLab.', 'en-US' : 'Model file from a folder or available for download from the Internet.\nYou can train your own model in DeepFaceLab.',
'ru-RU' : 'Файл модели из папки, либо доступные для загрузки из интернета.\nВы можете натренировать свою собственную модель в прогармме DeepFaceLab.', 'ru-RU' : 'Файл модели из папки, либо доступные для загрузки из интернета.\nВы можете натренировать свою собственную модель в прогармме DeepFaceLab.',
'zh-CN' : '从本地文件夹载入没有的话可从deepfacelab官方中文论坛dfldata.xyz下载模型文件。\您可以用 DeepFaceLab 训练自己的模型。', 'zh-CN' : '从本地文件夹载入没有的话可从deepfacelab官方中文论坛dfldata.xyz下载模型文件。\您可以用 DeepFaceLab 训练自己的模型。',
@ -779,7 +815,7 @@ class Localization:
'ja-JP' : 'モデルファイルをローカルまたはウェブからダウンロードして入力できます\n独自のモデルを作りたいときはDFLでトレーニングできます', 'ja-JP' : 'モデルファイルをローカルまたはウェブからダウンロードして入力できます\n独自のモデルを作りたいときはDFLでトレーニングできます',
'de-DE' : 'Modelldatei aus einem Ordner oder verfügbar zum Herunterladen aus dem Internet.\nSie können Ihr eigenes Modell in DeepFaceLab trainieren.'}, 'de-DE' : 'Modelldatei aus einem Ordner oder verfügbar zum Herunterladen aus dem Internet.\nSie können Ihr eigenes Modell in DeepFaceLab trainieren.'},
'QFaceSwapper.swap_all_faces':{ 'QFaceSwapDFM.swap_all_faces':{
'en-US' : 'Swap all faces', 'en-US' : 'Swap all faces',
'ru-RU' : 'Заменить все лица', 'ru-RU' : 'Заменить все лица',
'zh-CN' : '改变所有面孔', 'zh-CN' : '改变所有面孔',
@ -788,16 +824,9 @@ class Localization:
'ja-JP' : 'すべての顔を入替える', 'ja-JP' : 'すべての顔を入替える',
'de-DE' : 'Alle Gesichter tauschen'}, 'de-DE' : 'Alle Gesichter tauschen'},
'QFaceSwapper.face_id':{
'en-US' : 'Face ID',
'ru-RU' : 'Номер лица',
'zh-CN' : '人脸ID',
'es-ES' : 'ID de la cara',
'it-IT' : 'ID della faccia',
'ja-JP' : '顔のID番号',
'de-DE' : 'Gesichts ID'},
'QFaceSwapper.help.face_id':{ 'QFaceSwapDFM.help.face_id':{
'en-US' : 'Face ID to swap.', 'en-US' : 'Face ID to swap.',
'ru-RU' : 'Номер лица для замены', 'ru-RU' : 'Номер лица для замены',
'zh-CN' : '待换的人脸ID', 'zh-CN' : '待换的人脸ID',
@ -806,7 +835,7 @@ class Localization:
'ja-JP' : '指定したID番号の顔のみ入替えます', 'ja-JP' : '指定したID番号の顔のみ入替えます',
'de-DE' : 'Gesichts ID zum Tauschen.'}, 'de-DE' : 'Gesichts ID zum Tauschen.'},
'QFaceSwapper.morph_factor':{ 'QFaceSwapDFM.morph_factor':{
'en-US' : 'Morph factor', 'en-US' : 'Morph factor',
'ru-RU' : 'Степень морфа', 'ru-RU' : 'Степень морфа',
'zh-CN' : '变形因子', 'zh-CN' : '变形因子',
@ -815,7 +844,7 @@ class Localization:
'ja-JP' : 'モーフィング係数', 'ja-JP' : 'モーフィング係数',
'de-DE' : 'Morph-Faktor'}, 'de-DE' : 'Morph-Faktor'},
'QFaceSwapper.help.morph_factor':{ 'QFaceSwapDFM.help.morph_factor':{
'en-US' : 'Controls degree of face morph from source to celeb.', 'en-US' : 'Controls degree of face morph from source to celeb.',
'ru-RU' : 'Контролирует степень морфа лица от исходного в знаменитость.', 'ru-RU' : 'Контролирует степень морфа лица от исходного в знаменитость.',
'zh-CN' : '控制从源人脸到目标人脸的面部变形程度。', 'zh-CN' : '控制从源人脸到目标人脸的面部变形程度。',
@ -824,7 +853,7 @@ class Localization:
'ja-JP' : '顔の変化の度合いを調整します', 'ja-JP' : '顔の変化の度合いを調整します',
'de-DE' : 'Steuert den Grad des Gesichtsmorphs von der Quelle zum Prominenten.'}, 'de-DE' : 'Steuert den Grad des Gesichtsmorphs von der Quelle zum Prominenten.'},
'QFaceSwapper.presharpen_amount':{ 'QFaceSwapDFM.presharpen_amount':{
'en-US' : 'Pre-sharpen', 'en-US' : 'Pre-sharpen',
'ru-RU' : 'Пред-резкость', 'ru-RU' : 'Пред-резкость',
'zh-CN' : '预先锐化', 'zh-CN' : '预先锐化',
@ -833,7 +862,7 @@ class Localization:
'ja-JP' : '事前シャープ化', 'ja-JP' : '事前シャープ化',
'de-DE' : 'Vor-Schärfen'}, 'de-DE' : 'Vor-Schärfen'},
'QFaceSwapper.help.presharpen_amount':{ 'QFaceSwapDFM.help.presharpen_amount':{
'en-US' : 'Sharpen the image before feed into the neural network.', 'en-US' : 'Sharpen the image before feed into the neural network.',
'ru-RU' : 'Увеличить резкость лица до замены в нейронной сети.', 'ru-RU' : 'Увеличить резкость лица до замены в нейронной сети.',
'zh-CN' : '在送入神经网络前提前对图片锐化', 'zh-CN' : '在送入神经网络前提前对图片锐化',
@ -842,7 +871,7 @@ class Localization:
'ja-JP' : 'ニューラルネットワークに入力する前に画像をシャープネス処理します', 'ja-JP' : 'ニューラルネットワークに入力する前に画像をシャープネス処理します',
'de-DE' : 'Schärfen Sie das Bild, bevor Sie es in das neuronale Netz laden.'}, 'de-DE' : 'Schärfen Sie das Bild, bevor Sie es in das neuronale Netz laden.'},
'QFaceSwapper.pregamma':{ 'QFaceSwapDFM.pregamma':{
'en-US' : 'Pre-gamma', 'en-US' : 'Pre-gamma',
'ru-RU' : 'Пред-гамма', 'ru-RU' : 'Пред-гамма',
'zh-CN' : '预先伽马校正', 'zh-CN' : '预先伽马校正',
@ -851,7 +880,7 @@ class Localization:
'ja-JP' : '事前ガンマ補正', 'ja-JP' : '事前ガンマ補正',
'de-DE' : 'Vor-Gamma'}, 'de-DE' : 'Vor-Gamma'},
'QFaceSwapper.help.pregamma':{ 'QFaceSwapDFM.help.pregamma':{
'en-US' : 'Change gamma of the image before feed into the neural network.', 'en-US' : 'Change gamma of the image before feed into the neural network.',
'ru-RU' : 'Изменить гамму лица до замены в нейронной сети.', 'ru-RU' : 'Изменить гамму лица до замены в нейронной сети.',
'zh-CN' : '在送入神经网络前提前对图片伽马校正', 'zh-CN' : '在送入神经网络前提前对图片伽马校正',
@ -860,7 +889,7 @@ class Localization:
'ja-JP' : 'ニューラルネットワークに入力する前に画像の色の明暗を処理します', 'ja-JP' : 'ニューラルネットワークに入力する前に画像の色の明暗を処理します',
'de-DE' : 'Ändern Sie das Gamma des Bildes, bevor Sie es in das neuronale Netz laden.'}, 'de-DE' : 'Ändern Sie das Gamma des Bildes, bevor Sie es in das neuronale Netz laden.'},
'QFaceSwapper.postgamma':{ 'QFaceSwapDFM.postgamma':{
'en-US' : 'Post-gamma', 'en-US' : 'Post-gamma',
'ru-RU' : 'Пост-гамма', 'ru-RU' : 'Пост-гамма',
'zh-CN' : '后伽马校正', 'zh-CN' : '后伽马校正',
@ -869,7 +898,7 @@ class Localization:
'ja-JP' : '事後ガンマ補正', 'ja-JP' : '事後ガンマ補正',
'de-DE' : 'Nach-Gamma'}, 'de-DE' : 'Nach-Gamma'},
'QFaceSwapper.two_pass':{ 'QFaceSwapDFM.two_pass':{
'en-US' : 'Two pass', 'en-US' : 'Two pass',
'ru-RU' : '2 прохода', 'ru-RU' : '2 прохода',
'zh-CN' : '双重处理人脸', 'zh-CN' : '双重处理人脸',
@ -878,7 +907,7 @@ class Localization:
'ja-JP' : '2パス処理', 'ja-JP' : '2パス処理',
'de-DE' : 'Zwei Durchgänge'}, 'de-DE' : 'Zwei Durchgänge'},
'QFaceSwapper.help.two_pass':{ 'QFaceSwapDFM.help.two_pass':{
'en-US' : 'Process the face twice. Reduces the fps by a factor of 2.', 'en-US' : 'Process the face twice. Reduces the fps by a factor of 2.',
'ru-RU' : 'Обработать лицо дважды. Снижает кадр/сек в 2 раза.', 'ru-RU' : 'Обработать лицо дважды. Снижает кадр/сек в 2 раза.',
'zh-CN' : '处理面部两次。 fps随之减半', 'zh-CN' : '处理面部两次。 fps随之减半',
@ -1292,7 +1321,7 @@ class Localization:
'ja-JP' : 'From static rect', 'ja-JP' : 'From static rect',
'de-DE' : 'Vom statischen Rechteck'}, 'de-DE' : 'Vom statischen Rechteck'},
'FaceSwapper.model_information':{ 'FaceSwapDFM.model_information':{
'en-US' : 'Model information', 'en-US' : 'Model information',
'ru-RU' : 'Информация о модели', 'ru-RU' : 'Информация о модели',
'zh-CN' : '模型信息', 'zh-CN' : '模型信息',
@ -1301,7 +1330,7 @@ class Localization:
'ja-JP' : 'モデル情報', 'ja-JP' : 'モデル情報',
'de-DE' : 'Modellinformationen'}, 'de-DE' : 'Modellinformationen'},
'FaceSwapper.filename':{ 'FaceSwapDFM.filename':{
'en-US' : 'Filename:', 'en-US' : 'Filename:',
'ru-RU' : 'Имя файла:', 'ru-RU' : 'Имя файла:',
'zh-CN' : '文件名', 'zh-CN' : '文件名',
@ -1310,7 +1339,7 @@ class Localization:
'ja-JP' : 'ファイル名', 'ja-JP' : 'ファイル名',
'de-DE' : 'Dateiname:'}, 'de-DE' : 'Dateiname:'},
'FaceSwapper.resolution':{ 'FaceSwapDFM.resolution':{
'en-US' : 'Resolution:', 'en-US' : 'Resolution:',
'ru-RU' : 'Разрешение:', 'ru-RU' : 'Разрешение:',
'zh-CN' : '分辨率', 'zh-CN' : '分辨率',
@ -1319,7 +1348,7 @@ class Localization:
'ja-JP' : '解像度', 'ja-JP' : '解像度',
'de-DE' : 'Auflösung:'}, 'de-DE' : 'Auflösung:'},
'FaceSwapper.downloading_model':{ 'FaceSwapDFM.downloading_model':{
'en-US' : 'Downloading model...', 'en-US' : 'Downloading model...',
'ru-RU' : 'Загрузка модели...', 'ru-RU' : 'Загрузка модели...',
'zh-CN' : '下载模型中...', 'zh-CN' : '下载模型中...',

View file

@ -15,6 +15,15 @@ from xlib import os as lib_os
# # set environ for onnxruntime # # set environ for onnxruntime
# # os.environ['CUDA_PATH_V11_2'] = os.environ['CUDA_PATH'] # # os.environ['CUDA_PATH_V11_2'] = os.environ['CUDA_PATH']
# from modelhub.onnx import InsightFaceSwap
# x = InsightFaceSwap(InsightFaceSwap.get_available_devices()[0])
# import code
# code.interact(local=dict(globals(), **locals()))
def main(): def main():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers() subparsers = parser.add_subparsers()

View file

@ -0,0 +1,93 @@
from pathlib import Path
from typing import List
import numpy as np
from xlib.file import SplittedFile
from xlib.image import ImageProcessor
from xlib.onnxruntime import (InferenceSession_with_device, ORTDeviceInfo,
get_available_devices_info)
import cv2
import onnx
from onnx import numpy_helper
class InsightFaceSwap:
"""
arguments
device_info ORTDeviceInfo
use LIA.get_available_devices()
to determine a list of avaliable devices accepted by model
raises
Exception
"""
@staticmethod
def get_available_devices() -> List[ORTDeviceInfo]:
return get_available_devices_info()
def __init__(self, device_info : ORTDeviceInfo):
if device_info not in InsightFaceSwap.get_available_devices():
raise Exception(f'device_info {device_info} is not in available devices for InsightFaceSwap')
inswapper_path = Path(__file__).parent / 'inswapper_128.onnx'
SplittedFile.merge(inswapper_path, delete_parts=False)
if not inswapper_path.exists():
raise FileNotFoundError(f'{inswapper_path} not found')
w600k_path = Path(__file__).parent / 'w600k_r50.onnx'
SplittedFile.merge(w600k_path, delete_parts=False)
if not w600k_path.exists():
raise FileNotFoundError(f'{w600k_path} not found')
self._sess_swap = InferenceSession_with_device(str(inswapper_path), device_info)
self._sess_rec = InferenceSession_with_device(str(w600k_path), device_info)
swap_onnx_model = onnx.load(str(inswapper_path))
self._emap = numpy_helper.to_array(swap_onnx_model.graph.initializer[-1])
def get_input_size(self):
"""
returns optimal Width/Height for input images, thus you can resize source image to avoid extra load
"""
return 128
def get_face_vector_input_size(self):
return 112
def get_face_vector(self, img : np.ndarray) -> np.ndarray:
ip = ImageProcessor(img)
ip.fit_in(TW=112, TH=112, pad_to_target=True, allow_upscale=True)
img = ip.ch(3).to_ufloat32().get_image('NCHW')
latent = self._sess_rec.run([self._sess_rec.get_outputs()[0].name], {self._sess_rec.get_inputs()[0].name: img,})[0]
latent = np.dot(latent.reshape(1, -1,), self._emap)
latent /= np.linalg.norm(latent)
return latent
def generate(self, img : np.ndarray, face_vector : np.ndarray):
"""
arguments
img np.ndarray HW HWC 1HWC uint8/float32
face_vector np.ndarray
"""
ip_target = ImageProcessor(img)
dtype = ip_target.get_dtype()
_,H,W,_ = ip_target.get_dims()
out = self._sess_swap.run(['output'], {'target' : ip_target.resize( (128, 128) ).ch(3).to_ufloat32().get_image('NCHW'),
'source' : face_vector}
)[0].transpose(0,2,3,1)[0]
out = ImageProcessor(out).to_dtype(dtype).resize((W,H)).get_image('HWC')
return out

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,6 +1,7 @@
from .CenterFace.CenterFace import CenterFace from .CenterFace.CenterFace import CenterFace
from .FaceMesh.FaceMesh import FaceMesh from .FaceMesh.FaceMesh import FaceMesh
from .InsightFace2d106.InsightFace2D106 import InsightFace2D106
from .InsightFaceSwap.InsightFaceSwap import InsightFaceSwap
from .LIA.LIA import LIA
from .S3FD.S3FD import S3FD from .S3FD.S3FD import S3FD
from .YoloV5Face.YoloV5Face import YoloV5Face from .YoloV5Face.YoloV5Face import YoloV5Face
from .InsightFace2d106.InsightFace2D106 import InsightFace2D106
from .LIA.LIA import LIA

View file

@ -10,6 +10,8 @@ from xlib import cv as lib_cv
repo_root = Path(__file__).parent.parent repo_root = Path(__file__).parent.parent
large_files_list = [ (repo_root / 'modelhub' / 'onnx' / 'S3FD' / 'S3FD.onnx', 48*1024*1024), large_files_list = [ (repo_root / 'modelhub' / 'onnx' / 'S3FD' / 'S3FD.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'onnx' / 'LIA' / 'generator.onnx', 48*1024*1024), (repo_root / 'modelhub' / 'onnx' / 'LIA' / 'generator.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'onnx' / 'InsightFaceSwap' / 'inswapper_128.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'onnx' / 'InsightFaceSwap' / 'w600k_r50.onnx', 48*1024*1024),
(repo_root / 'modelhub' / 'torch' / 'S3FD' / 'S3FD.pth', 48*1024*1024), (repo_root / 'modelhub' / 'torch' / 'S3FD' / 'S3FD.pth', 48*1024*1024),
(repo_root / 'modelhub' / 'cv' / 'FaceMarkerLBF' / 'lbfmodel.yaml', 34*1024*1024), (repo_root / 'modelhub' / 'cv' / 'FaceMarkerLBF' / 'lbfmodel.yaml', 34*1024*1024),
] ]