added Face Animator module
29
README.md
|
@ -12,9 +12,13 @@
|
|||
|
||||
<tr><td colspan=2 align="center">
|
||||
|
||||
## Available ready-to-use public face models.
|
||||
## Face Swapper
|
||||
|
||||
## These persons do not exists. Similarities with real people are accidental.
|
||||
You can swap your face from a webcam or the face in the video using trained face models.
|
||||
|
||||
Here is a list of available ready-to-use public face models.
|
||||
|
||||
These persons do not exists. Similarities with real people are accidental.
|
||||
|
||||
</td></tr>
|
||||
|
||||
|
@ -117,6 +121,21 @@ Here is an <a href="https://www.tiktok.com/@arnoldschwarzneggar/video/6995538782
|
|||
|
||||
<tr><td colspan=2 align="center">
|
||||
|
||||
## Face Animator
|
||||
|
||||
There is also a Face Animator module in DeepFaceLive app. You can control a static face picture using video or your own face from the camera. The quality is not the best, and requires fine face matching and tuning parameters for every face pair, but enough for funny videos and memes or real-time streaming at 25 fps using 35 TFLOPS GPU.
|
||||
|
||||
<img src="doc/face_animator_example.gif"></img>
|
||||
|
||||
Here is a [mini video](doc/FaceAnimator_tutor.mp4) showing the process of setting up the Face Animator for Obama controlling Kim Chen's face.
|
||||
|
||||
</td></tr>
|
||||
|
||||
</table>
|
||||
<table align="center" border="0">
|
||||
|
||||
<tr><td colspan=2 align="center">
|
||||
|
||||
## Minimum system requirements
|
||||
|
||||
any DirectX12 compatible graphics card
|
||||
|
@ -150,7 +169,7 @@ Windows 10
|
|||
## Releases
|
||||
|
||||
</td></tr>
|
||||
<tr><td align="right">
|
||||
<tr><td align="right">
|
||||
|
||||
<a href="https://disk.yandex.ru/d/7i5XTKIKVg5UUg">Windows 10 x64 (yandex.ru)</a>
|
||||
|
||||
|
@ -187,7 +206,7 @@ NVIDIA build : NVIDIA cards only, GT730 and higher. Works faster than DX12. Face
|
|||
|
||||
</td></tr>
|
||||
<tr><td colspan=2 align="center">
|
||||
I need the computing power to train models.
|
||||
I need the computing power to train models.
|
||||
<br>
|
||||
If you have a free computer with 2080TI or better card with 12GB+ VRAM, you can give me remote access to it. I will train 1 model in a month. Contact me(iperov#6528) in Discord channel.
|
||||
</td></tr>
|
||||
|
@ -209,7 +228,7 @@ bitcoin:bc1qewl062v70rszulml3f0mjdjrys8uxdydw3v6rq
|
|||
<!--
|
||||
<a href="https://br-stone.online"><img src="doc/logo_barclay_stone.png"></img></a><a href="https://exmo.com"><img src="doc/logo_exmo.png"></img></a>
|
||||
|
||||
presents
|
||||
presents
|
||||
|
||||
<tr><td align="right">
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ from .ui.QFaceAligner import QFaceAligner
|
|||
from .ui.QFaceDetector import QFaceDetector
|
||||
from .ui.QFaceMarker import QFaceMarker
|
||||
from .ui.QFaceMerger import QFaceMerger
|
||||
from .ui.QFaceAnimator import QFaceAnimator
|
||||
from .ui.QFaceSwapper import QFaceSwapper
|
||||
from .ui.QFileSource import QFileSource
|
||||
from .ui.QFrameAdjuster import QFrameAdjuster
|
||||
|
@ -32,6 +33,9 @@ class QLiveSwap(qtx.QXWidget):
|
|||
dfm_models_path = userdata_path / 'dfm_models'
|
||||
dfm_models_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
animatables_path = userdata_path / 'animatables'
|
||||
animatables_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
output_sequence_path = userdata_path / 'output_sequence'
|
||||
output_sequence_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
@ -53,18 +57,21 @@ class QLiveSwap(qtx.QXWidget):
|
|||
face_detector = self.face_detector = backend.FaceDetector (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=multi_sources_bc_out, bc_out=face_detector_bc_out, backend_db=backend_db )
|
||||
face_marker = self.face_marker = backend.FaceMarker (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_detector_bc_out, bc_out=face_marker_bc_out, backend_db=backend_db)
|
||||
face_aligner = self.face_aligner = backend.FaceAligner (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_marker_bc_out, bc_out=face_aligner_bc_out, backend_db=backend_db )
|
||||
face_animator = self.face_animator = backend.FaceAnimator (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_merger_bc_out, animatables_path=animatables_path, backend_db=backend_db )
|
||||
|
||||
face_swapper = self.face_swapper = backend.FaceSwapper (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_aligner_bc_out, bc_out=face_swapper_bc_out, dfm_models_path=dfm_models_path, backend_db=backend_db )
|
||||
frame_adjuster = self.frame_adjuster = backend.FrameAdjuster(weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_swapper_bc_out, bc_out=frame_adjuster_bc_out, backend_db=backend_db )
|
||||
face_merger = self.face_merger = backend.FaceMerger (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=frame_adjuster_bc_out, bc_out=face_merger_bc_out, backend_db=backend_db )
|
||||
stream_output = self.stream_output = backend.StreamOutput (weak_heap=backed_weak_heap, reemit_frame_signal=reemit_frame_signal, bc_in=face_merger_bc_out, save_default_path=userdata_path, backend_db=backend_db)
|
||||
|
||||
self.all_backends : List[backend.BackendHost] = [file_source, camera_source, face_detector, face_marker, face_aligner, face_swapper, frame_adjuster, face_merger, stream_output]
|
||||
self.all_backends : List[backend.BackendHost] = [file_source, camera_source, face_detector, face_marker, face_aligner, face_animator, face_swapper, frame_adjuster, face_merger, stream_output]
|
||||
|
||||
self.q_file_source = QFileSource(self.file_source)
|
||||
self.q_camera_source = QCameraSource(self.camera_source)
|
||||
self.q_face_detector = QFaceDetector(self.face_detector)
|
||||
self.q_face_marker = QFaceMarker(self.face_marker)
|
||||
self.q_face_aligner = QFaceAligner(self.face_aligner)
|
||||
self.q_face_animator = QFaceAnimator(self.face_animator, animatables_path=animatables_path)
|
||||
self.q_face_swapper = QFaceSwapper(self.face_swapper, dfm_models_path=dfm_models_path)
|
||||
self.q_frame_adjuster = QFrameAdjuster(self.frame_adjuster)
|
||||
self.q_face_merger = QFaceMerger(self.face_merger)
|
||||
|
@ -72,12 +79,12 @@ class QLiveSwap(qtx.QXWidget):
|
|||
|
||||
self.q_ds_frame_viewer = QBCFrameViewer(backed_weak_heap, multi_sources_bc_out)
|
||||
self.q_ds_fa_viewer = QBCFaceAlignViewer(backed_weak_heap, face_aligner_bc_out, preview_width=256)
|
||||
self.q_ds_fc_viewer = QBCFaceSwapViewer(backed_weak_heap, face_swapper_bc_out, preview_width=256)
|
||||
self.q_ds_fc_viewer = QBCFaceSwapViewer(backed_weak_heap, face_merger_bc_out, preview_width=256)
|
||||
self.q_ds_merged_frame_viewer = QBCMergedFrameViewer(backed_weak_heap, face_merger_bc_out)
|
||||
|
||||
q_nodes = qtx.QXWidgetHBox([ qtx.QXWidgetVBox([self.q_file_source, self.q_camera_source], spacing=5, fixed_width=256),
|
||||
qtx.QXWidgetVBox([self.q_face_detector, self.q_face_aligner,], spacing=5, fixed_width=256),
|
||||
qtx.QXWidgetVBox([self.q_face_marker, self.q_face_swapper], spacing=5, fixed_width=256),
|
||||
qtx.QXWidgetVBox([self.q_face_marker, self.q_face_animator, self.q_face_swapper], spacing=5, fixed_width=256),
|
||||
qtx.QXWidgetVBox([self.q_frame_adjuster, self.q_face_merger, self.q_stream_output], spacing=5, fixed_width=256),
|
||||
], spacing=5, size_policy=('fixed', 'fixed') )
|
||||
|
||||
|
@ -88,7 +95,7 @@ class QLiveSwap(qtx.QXWidget):
|
|||
], spacing=5, size_policy=('fixed', 'fixed') )
|
||||
|
||||
self.setLayout(qtx.QXVBoxLayout( [ (qtx.QXWidgetVBox([q_nodes, q_view_nodes], spacing=5), qtx.AlignCenter) ]))
|
||||
|
||||
|
||||
self._timer = qtx.QXTimer(interval=5, timeout=self._on_timer_5ms, start=True)
|
||||
|
||||
def _process_messages(self):
|
||||
|
@ -103,8 +110,11 @@ class QLiveSwap(qtx.QXWidget):
|
|||
self.backend_db.clear()
|
||||
|
||||
def initialize(self):
|
||||
for backend in self.all_backends:
|
||||
backend.restore_on_off_state()
|
||||
for bcknd in self.all_backends:
|
||||
default_state = True
|
||||
if isinstance(bcknd, (backend.CameraSource, backend.FaceAnimator) ):
|
||||
default_state = False
|
||||
bcknd.restore_on_off_state(default_state=default_state)
|
||||
|
||||
def finalize(self):
|
||||
# Gracefully stop the backend
|
||||
|
@ -146,10 +156,10 @@ class QDFLAppWindow(qtx.QXWindow):
|
|||
|
||||
menu_language_action_english = menu_language.addAction('English' )
|
||||
menu_language_action_english.triggered.connect(lambda: (qtx.QXMainApplication.inst.set_language('en-US'), qtx.QXMainApplication.inst.reinitialize()) )
|
||||
|
||||
|
||||
menu_language_action_spanish = menu_language.addAction('Español' )
|
||||
menu_language_action_spanish.triggered.connect(lambda: (qtx.QXMainApplication.inst.set_language('es-ES'), qtx.QXMainApplication.inst.reinitialize()) )
|
||||
|
||||
|
||||
menu_language_action_italian = menu_language.addAction('Italiano' )
|
||||
menu_language_action_italian.triggered.connect(lambda: (qtx.QXMainApplication.inst.set_language('it-IT'), qtx.QXMainApplication.inst.reinitialize()) )
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ class BackendFaceSwapInfo:
|
|||
self.face_align_image_name : str = None
|
||||
self.face_align_mask_name : str = None
|
||||
self.face_align_lmrks_mask_name : str = None
|
||||
self.face_anim_image_name : str = None
|
||||
self.face_swap_image_name : str = None
|
||||
self.face_swap_mask_name : str = None
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ class CameraSourceWorker(BackendWorker):
|
|||
|
||||
cs.driver.enable()
|
||||
cs.driver.set_choices(_DriverType, _DriverType_names, none_choice_name='@misc.menu_select')
|
||||
cs.driver.select(state.driver)
|
||||
cs.driver.select(state.driver if state.driver is not None else _DriverType.DSHOW if platform.system() == 'Windows' else _DriverType.COMPATIBLE)
|
||||
|
||||
cs.resolution.enable()
|
||||
cs.resolution.set_choices(_ResolutionType, _ResolutionType_names, none_choice_name=None)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import time
|
||||
from enum import IntEnum
|
||||
|
||||
import numpy as np
|
||||
from xlib import os as lib_os
|
||||
|
@ -10,6 +11,14 @@ from .BackendBase import (BackendConnection, BackendDB, BackendHost,
|
|||
BackendWorkerState)
|
||||
|
||||
|
||||
class AlignMode(IntEnum):
|
||||
FROM_RECT = 0
|
||||
FROM_POINTS = 1
|
||||
|
||||
AlignModeNames = ['@FaceAligner.AlignMode.FROM_RECT',
|
||||
'@FaceAligner.AlignMode.FROM_POINTS',
|
||||
]
|
||||
|
||||
class FaceAligner(BackendHost):
|
||||
def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, backend_db : BackendDB = None):
|
||||
super().__init__(backend_db=backend_db,
|
||||
|
@ -33,13 +42,20 @@ class FaceAlignerWorker(BackendWorker):
|
|||
lib_os.set_timer_resolution(1)
|
||||
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
cs.align_mode.call_on_selected(self.on_cs_align_mode)
|
||||
cs.face_coverage.call_on_number(self.on_cs_face_coverage)
|
||||
cs.resolution.call_on_number(self.on_cs_resolution)
|
||||
cs.exclude_moving_parts.call_on_flag(self.on_cs_exclude_moving_parts)
|
||||
cs.head_mode.call_on_flag(self.on_cs_head_mode)
|
||||
cs.freeze_z_rotation.call_on_flag(self.on_cs_freeze_z_rotation)
|
||||
|
||||
cs.x_offset.call_on_number(self.on_cs_x_offset)
|
||||
cs.y_offset.call_on_number(self.on_cs_y_offset)
|
||||
|
||||
cs.align_mode.enable()
|
||||
cs.align_mode.set_choices(AlignMode, AlignModeNames)
|
||||
cs.align_mode.select(state.align_mode if state.align_mode is not None else AlignMode.FROM_POINTS)
|
||||
|
||||
cs.face_coverage.enable()
|
||||
cs.face_coverage.set_config(lib_csw.Number.Config(min=0.1, max=4.0, step=0.1, decimals=1, allow_instant_update=True))
|
||||
cs.face_coverage.set_number(state.face_coverage if state.face_coverage is not None else 2.2)
|
||||
|
@ -54,6 +70,9 @@ class FaceAlignerWorker(BackendWorker):
|
|||
cs.head_mode.enable()
|
||||
cs.head_mode.set_flag(state.head_mode if state.head_mode is not None else False)
|
||||
|
||||
cs.freeze_z_rotation.enable()
|
||||
cs.freeze_z_rotation.set_flag(state.freeze_z_rotation if state.freeze_z_rotation is not None else False)
|
||||
|
||||
cs.x_offset.enable()
|
||||
cs.x_offset.set_config(lib_csw.Number.Config(min=-1, max=1, step=0.01, decimals=2, allow_instant_update=True))
|
||||
cs.x_offset.set_number(state.x_offset if state.x_offset is not None else 0)
|
||||
|
@ -62,6 +81,12 @@ class FaceAlignerWorker(BackendWorker):
|
|||
cs.y_offset.set_config(lib_csw.Number.Config(min=-1, max=1, step=0.01, decimals=2, allow_instant_update=True))
|
||||
cs.y_offset.set_number(state.y_offset if state.y_offset is not None else 0)
|
||||
|
||||
def on_cs_align_mode(self, idx, align_mode):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
state.align_mode = align_mode
|
||||
|
||||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_cs_face_coverage(self, face_coverage):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
|
@ -91,6 +116,12 @@ class FaceAlignerWorker(BackendWorker):
|
|||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_cs_freeze_z_rotation(self, freeze_z_rotation):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
state.freeze_z_rotation = freeze_z_rotation
|
||||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_cs_x_offset(self, x_offset):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
cfg = cs.x_offset.get_config()
|
||||
|
@ -123,19 +154,28 @@ class FaceAlignerWorker(BackendWorker):
|
|||
if all_is_not_None(state.face_coverage, state.resolution, frame_image):
|
||||
for face_id, fsi in enumerate( bcd.get_face_swap_info_list() ):
|
||||
head_yaw = None
|
||||
if state.head_mode:
|
||||
if state.head_mode or state.freeze_z_rotation:
|
||||
if fsi.face_pose is not None:
|
||||
head_yaw = fsi.face_pose.as_radians()[1]
|
||||
|
||||
|
||||
|
||||
face_ulmrks = fsi.face_ulmrks
|
||||
if face_ulmrks is not None:
|
||||
fsi.face_resolution = state.resolution
|
||||
|
||||
face_align_img, uni_mat = face_ulmrks.cut(frame_image, state.face_coverage, state.resolution,
|
||||
exclude_moving_parts=state.exclude_moving_parts,
|
||||
head_yaw=head_yaw,
|
||||
x_offset=state.x_offset,
|
||||
y_offset=state.y_offset-0.08)
|
||||
if state.align_mode == AlignMode.FROM_RECT:
|
||||
face_align_img, uni_mat = fsi.face_urect.cut(frame_image, coverage= state.face_coverage, output_size=state.resolution,
|
||||
x_offset=state.x_offset, y_offset=state.y_offset)
|
||||
|
||||
elif state.align_mode == AlignMode.FROM_POINTS:
|
||||
face_align_img, uni_mat = face_ulmrks.cut(frame_image, state.face_coverage, state.resolution,
|
||||
exclude_moving_parts=state.exclude_moving_parts,
|
||||
head_yaw=head_yaw,
|
||||
x_offset=state.x_offset,
|
||||
y_offset=state.y_offset-0.08,
|
||||
freeze_z_rotation=state.freeze_z_rotation)
|
||||
|
||||
|
||||
fsi.face_align_image_name = f'{frame_image_name}_{face_id}_aligned'
|
||||
fsi.image_to_align_uni_mat = uni_mat
|
||||
|
@ -147,7 +187,6 @@ class FaceAlignerWorker(BackendWorker):
|
|||
fsi.face_align_lmrks_mask_name = f'{frame_image_name}_{face_id}_aligned_lmrks_mask'
|
||||
bcd.set_image(fsi.face_align_lmrks_mask_name, face_align_lmrks_mask_img)
|
||||
|
||||
|
||||
self.stop_profile_timing()
|
||||
self.pending_bcd = bcd
|
||||
|
||||
|
@ -158,32 +197,37 @@ class FaceAlignerWorker(BackendWorker):
|
|||
else:
|
||||
time.sleep(0.001)
|
||||
|
||||
|
||||
class Sheet:
|
||||
class Host(lib_csw.Sheet.Host):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.align_mode = lib_csw.DynamicSingleSwitch.Client()
|
||||
self.face_coverage = lib_csw.Number.Client()
|
||||
self.resolution = lib_csw.Number.Client()
|
||||
self.exclude_moving_parts = lib_csw.Flag.Client()
|
||||
self.head_mode = lib_csw.Flag.Client()
|
||||
self.freeze_z_rotation = lib_csw.Flag.Client()
|
||||
self.x_offset = lib_csw.Number.Client()
|
||||
self.y_offset = lib_csw.Number.Client()
|
||||
|
||||
class Worker(lib_csw.Sheet.Worker):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.align_mode = lib_csw.DynamicSingleSwitch.Host()
|
||||
self.face_coverage = lib_csw.Number.Host()
|
||||
self.resolution = lib_csw.Number.Host()
|
||||
self.exclude_moving_parts = lib_csw.Flag.Host()
|
||||
self.head_mode = lib_csw.Flag.Host()
|
||||
self.freeze_z_rotation = lib_csw.Flag.Host()
|
||||
self.x_offset = lib_csw.Number.Host()
|
||||
self.y_offset = lib_csw.Number.Host()
|
||||
|
||||
class WorkerState(BackendWorkerState):
|
||||
align_mode = None
|
||||
face_coverage : float = None
|
||||
resolution : int = None
|
||||
exclude_moving_parts : bool = None
|
||||
head_mode : bool = None
|
||||
freeze_z_rotation : bool = None
|
||||
x_offset : float = None
|
||||
y_offset : float = None
|
||||
|
|
224
apps/DeepFaceLive/backend/FaceAnimator.py
Normal file
|
@ -0,0 +1,224 @@
|
|||
import re
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from modelhub.onnx import TPSMM
|
||||
from xlib import cv as lib_cv2
|
||||
from xlib import os as lib_os
|
||||
from xlib import path as lib_path
|
||||
from xlib.image.ImageProcessor import ImageProcessor
|
||||
from xlib.mp import csw as lib_csw
|
||||
|
||||
from .BackendBase import (BackendConnection, BackendDB, BackendHost,
|
||||
BackendSignal, BackendWeakHeap, BackendWorker,
|
||||
BackendWorkerState)
|
||||
|
||||
|
||||
class FaceAnimator(BackendHost):
|
||||
def __init__(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, animatables_path : Path, backend_db : BackendDB = None,
|
||||
id : int = 0):
|
||||
self._id = id
|
||||
super().__init__(backend_db=backend_db,
|
||||
sheet_cls=Sheet,
|
||||
worker_cls=FaceAnimatorWorker,
|
||||
worker_state_cls=WorkerState,
|
||||
worker_start_args=[weak_heap, reemit_frame_signal, bc_in, bc_out, animatables_path])
|
||||
|
||||
def get_control_sheet(self) -> 'Sheet.Host': return super().get_control_sheet()
|
||||
|
||||
def _get_name(self):
|
||||
return super()._get_name()# + f'{self._id}'
|
||||
|
||||
class FaceAnimatorWorker(BackendWorker):
|
||||
def get_state(self) -> 'WorkerState': return super().get_state()
|
||||
def get_control_sheet(self) -> 'Sheet.Worker': return super().get_control_sheet()
|
||||
|
||||
def on_start(self, weak_heap : BackendWeakHeap, reemit_frame_signal : BackendSignal, bc_in : BackendConnection, bc_out : BackendConnection, animatables_path : Path):
|
||||
self.weak_heap = weak_heap
|
||||
self.reemit_frame_signal = reemit_frame_signal
|
||||
self.bc_in = bc_in
|
||||
self.bc_out = bc_out
|
||||
self.animatables_path = animatables_path
|
||||
|
||||
self.pending_bcd = None
|
||||
|
||||
self.tpsmm_model = None
|
||||
|
||||
self.animatable_img = None
|
||||
self.driving_ref_kp = None
|
||||
self.last_driving_kp = None
|
||||
|
||||
lib_os.set_timer_resolution(1)
|
||||
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
|
||||
cs.device.call_on_selected(self.on_cs_device)
|
||||
cs.animatable.call_on_selected(self.on_cs_animatable)
|
||||
|
||||
cs.animator_face_id.call_on_number(self.on_cs_animator_face_id)
|
||||
cs.relative_mode.call_on_flag(self.on_cs_relative_mode)
|
||||
cs.relative_power.call_on_number(self.on_cs_relative_power)
|
||||
cs.update_animatables.call_on_signal(self.update_animatables)
|
||||
cs.reset_reference_pose.call_on_signal(self.on_cs_reset_reference_pose)
|
||||
|
||||
|
||||
cs.device.enable()
|
||||
cs.device.set_choices( TPSMM.get_available_devices(), none_choice_name='@misc.menu_select')
|
||||
cs.device.select(state.device)
|
||||
|
||||
def update_animatables(self):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
cs.animatable.set_choices([animatable_path.name for animatable_path in lib_path.get_files_paths(self.animatables_path, extensions=['.jpg','.jpeg','.png'])], none_choice_name='@misc.menu_select')
|
||||
|
||||
|
||||
def on_cs_device(self, idx, device):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
if device is not None and state.device == device:
|
||||
self.tpsmm_model = TPSMM(device)
|
||||
|
||||
cs.animatable.enable()
|
||||
self.update_animatables()
|
||||
cs.animatable.select(state.animatable)
|
||||
|
||||
cs.animator_face_id.enable()
|
||||
cs.animator_face_id.set_config(lib_csw.Number.Config(min=0, max=16, step=1, decimals=0, allow_instant_update=True))
|
||||
cs.animator_face_id.set_number(state.animator_face_id if state.animator_face_id is not None else 0)
|
||||
|
||||
cs.relative_mode.enable()
|
||||
cs.relative_mode.set_flag(state.relative_mode if state.relative_mode is not None else True)
|
||||
|
||||
cs.relative_power.enable()
|
||||
cs.relative_power.set_config(lib_csw.Number.Config(min=0.0, max=1.0, step=0.01, decimals=2, allow_instant_update=True))
|
||||
cs.relative_power.set_number(state.relative_power if state.relative_power is not None else 1.0)
|
||||
|
||||
cs.update_animatables.enable()
|
||||
cs.reset_reference_pose.enable()
|
||||
else:
|
||||
state.device = device
|
||||
self.save_state()
|
||||
self.restart()
|
||||
|
||||
def on_cs_animatable(self, idx, animatable):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
|
||||
try:
|
||||
W,H = self.tpsmm_model.get_input_size()
|
||||
|
||||
ip = ImageProcessor(lib_cv2.imread(self.animatables_path / animatable))
|
||||
ip.fit_in(TW=W, TH=H, pad_to_target=True, allow_upscale=True)
|
||||
|
||||
self.animatable_img = ip.get_image('HWC')
|
||||
self.animatable_kp = self.tpsmm_model.extract_kp(self.animatable_img)
|
||||
self.driving_ref_kp = None
|
||||
|
||||
state.animatable = animatable
|
||||
except Exception as e:
|
||||
print(e)
|
||||
self.animatable_img = None
|
||||
cs.animatable.unselect()
|
||||
|
||||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
|
||||
def on_cs_animator_face_id(self, animator_face_id):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
cfg = cs.animator_face_id.get_config()
|
||||
animator_face_id = state.animator_face_id = int(np.clip(animator_face_id, cfg.min, cfg.max))
|
||||
cs.animator_face_id.set_number(animator_face_id)
|
||||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_cs_relative_mode(self, relative_mode):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
state.relative_mode = relative_mode
|
||||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_cs_relative_power(self, relative_power):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
cfg = cs.relative_power.get_config()
|
||||
relative_power = state.relative_power = float(np.clip(relative_power, cfg.min, cfg.max))
|
||||
cs.relative_power.set_number(relative_power)
|
||||
self.save_state()
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_cs_reset_reference_pose(self):
|
||||
self.driving_ref_kp = self.last_driving_kp
|
||||
self.reemit_frame_signal.send()
|
||||
|
||||
def on_tick(self):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
|
||||
if self.pending_bcd is None:
|
||||
self.start_profile_timing()
|
||||
|
||||
bcd = self.bc_in.read(timeout=0.005)
|
||||
if bcd is not None:
|
||||
bcd.assign_weak_heap(self.weak_heap)
|
||||
|
||||
tpsmm_model = self.tpsmm_model
|
||||
if tpsmm_model is not None and self.animatable_img is not None:
|
||||
|
||||
for i, fsi in enumerate(bcd.get_face_swap_info_list()):
|
||||
if state.animator_face_id == i:
|
||||
face_align_image = bcd.get_image(fsi.face_align_image_name)
|
||||
if face_align_image is not None:
|
||||
|
||||
_,H,W,_ = ImageProcessor(face_align_image).get_dims()
|
||||
|
||||
driving_kp = self.last_driving_kp = tpsmm_model.extract_kp(face_align_image)
|
||||
|
||||
if self.driving_ref_kp is None:
|
||||
self.driving_ref_kp = driving_kp
|
||||
|
||||
anim_image = tpsmm_model.generate(self.animatable_img, self.animatable_kp, driving_kp,
|
||||
self.driving_ref_kp if state.relative_mode else None,
|
||||
relative_power=state.relative_power)
|
||||
anim_image = ImageProcessor(anim_image).resize((W,H)).get_image('HWC')
|
||||
|
||||
fsi.face_swap_image_name = f'{fsi.face_align_image_name}_swapped'
|
||||
bcd.set_image(fsi.face_swap_image_name, anim_image)
|
||||
break
|
||||
|
||||
self.stop_profile_timing()
|
||||
self.pending_bcd = bcd
|
||||
|
||||
if self.pending_bcd is not None:
|
||||
if self.bc_out.is_full_read(1):
|
||||
self.bc_out.write(self.pending_bcd)
|
||||
self.pending_bcd = None
|
||||
else:
|
||||
time.sleep(0.001)
|
||||
|
||||
class Sheet:
|
||||
class Host(lib_csw.Sheet.Host):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.device = lib_csw.DynamicSingleSwitch.Client()
|
||||
self.animatable = lib_csw.DynamicSingleSwitch.Client()
|
||||
self.animator_face_id = lib_csw.Number.Client()
|
||||
self.relative_mode = lib_csw.Flag.Client()
|
||||
self.update_animatables = lib_csw.Signal.Client()
|
||||
self.reset_reference_pose = lib_csw.Signal.Client()
|
||||
self.relative_power = lib_csw.Number.Client()
|
||||
|
||||
class Worker(lib_csw.Sheet.Worker):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.device = lib_csw.DynamicSingleSwitch.Host()
|
||||
self.animatable = lib_csw.DynamicSingleSwitch.Host()
|
||||
self.animator_face_id = lib_csw.Number.Host()
|
||||
self.relative_mode = lib_csw.Flag.Host()
|
||||
self.update_animatables = lib_csw.Signal.Host()
|
||||
self.reset_reference_pose = lib_csw.Signal.Host()
|
||||
self.relative_power = lib_csw.Number.Host()
|
||||
|
||||
class WorkerState(BackendWorkerState):
|
||||
device = None
|
||||
animatable : str = None
|
||||
animator_face_id : int = None
|
||||
relative_mode : bool = None
|
||||
relative_power : float = None
|
|
@ -139,7 +139,7 @@ class FaceDetectorWorker(BackendWorker):
|
|||
cs.sort_by.select(detector_state.sort_by if detector_state.sort_by is not None else FaceSortBy.LARGEST)
|
||||
|
||||
cs.temporal_smoothing.enable()
|
||||
cs.temporal_smoothing.set_config(lib_csw.Number.Config(min=1, max=10, step=1, allow_instant_update=True))
|
||||
cs.temporal_smoothing.set_config(lib_csw.Number.Config(min=1, max=50, step=1, allow_instant_update=True))
|
||||
cs.temporal_smoothing.set_number(detector_state.temporal_smoothing if detector_state.temporal_smoothing is not None else 1)
|
||||
|
||||
if detector_type == DetectorType.CENTER_FACE:
|
||||
|
|
|
@ -60,7 +60,7 @@ class FaceMarkerWorker(BackendWorker):
|
|||
|
||||
cs.marker_type.enable()
|
||||
cs.marker_type.set_choices(MarkerType, MarkerTypeNames, none_choice_name=None)
|
||||
cs.marker_type.select(state.marker_type if state.marker_type is not None else MarkerType.INSIGHT_2D106)
|
||||
cs.marker_type.select(state.marker_type if state.marker_type is not None else MarkerType.GOOGLE_FACEMESH)
|
||||
|
||||
def on_cs_marker_type(self, idx, marker_type):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
|
@ -113,7 +113,7 @@ class FaceMarkerWorker(BackendWorker):
|
|||
cs.marker_coverage.set_number(marker_coverage)
|
||||
|
||||
cs.temporal_smoothing.enable()
|
||||
cs.temporal_smoothing.set_config(lib_csw.Number.Config(min=1, max=10, step=1, allow_instant_update=True))
|
||||
cs.temporal_smoothing.set_config(lib_csw.Number.Config(min=1, max=50, step=1, allow_instant_update=True))
|
||||
cs.temporal_smoothing.set_number(marker_state.temporal_smoothing if marker_state.temporal_smoothing is not None else 1)
|
||||
|
||||
else:
|
||||
|
|
|
@ -113,7 +113,6 @@ class FaceMergerWorker(BackendWorker):
|
|||
cs.face_opacity.enable()
|
||||
cs.face_opacity.set_config(lib_csw.Number.Config(min=0.0, max=1.0, step=0.01, decimals=2, allow_instant_update=True))
|
||||
cs.face_opacity.set_number(state.face_opacity if state.face_opacity is not None else 1.0)
|
||||
|
||||
else:
|
||||
state.device = device
|
||||
self.save_state()
|
||||
|
@ -326,30 +325,38 @@ class FaceMergerWorker(BackendWorker):
|
|||
fsi_list = bcd.get_face_swap_info_list()
|
||||
fsi_list_len = len(fsi_list)
|
||||
has_merged_faces = False
|
||||
|
||||
for fsi_id, fsi in enumerate(fsi_list):
|
||||
image_to_align_uni_mat = fsi.image_to_align_uni_mat
|
||||
face_resolution = fsi.face_resolution
|
||||
|
||||
face_align_img = bcd.get_image(fsi.face_align_image_name)
|
||||
face_align_lmrks_mask_img = bcd.get_image(fsi.face_align_lmrks_mask_name)
|
||||
face_align_mask_img = bcd.get_image(fsi.face_align_mask_name)
|
||||
face_swap_img = bcd.get_image(fsi.face_swap_image_name)
|
||||
face_swap_mask_img = bcd.get_image(fsi.face_swap_mask_name)
|
||||
|
||||
if all_is_not_None(face_resolution, face_align_img, face_align_mask_img, face_swap_img, face_swap_mask_img, image_to_align_uni_mat):
|
||||
face_anim_img = bcd.get_image(fsi.face_anim_image_name)
|
||||
if face_anim_img is not None:
|
||||
has_merged_faces = True
|
||||
face_height, face_width = face_align_img.shape[:2]
|
||||
frame_height, frame_width = merged_frame.shape[:2]
|
||||
aligned_to_source_uni_mat = image_to_align_uni_mat.invert()
|
||||
aligned_to_source_uni_mat = aligned_to_source_uni_mat.source_translated(-state.face_x_offset, -state.face_y_offset)
|
||||
aligned_to_source_uni_mat = aligned_to_source_uni_mat.source_scaled_around_center(state.face_scale,state.face_scale)
|
||||
aligned_to_source_uni_mat = aligned_to_source_uni_mat.to_exact_mat (face_width, face_height, frame_width, frame_height)
|
||||
merged_frame = face_anim_img
|
||||
else:
|
||||
|
||||
do_color_compression = fsi_id == fsi_list_len-1
|
||||
if state.device == 'CPU':
|
||||
merged_frame = self._merge_on_cpu(merged_frame, face_resolution, face_align_img, face_align_mask_img, face_align_lmrks_mask_img, face_swap_img, face_swap_mask_img, aligned_to_source_uni_mat, frame_width, frame_height, do_color_compression=do_color_compression )
|
||||
else:
|
||||
merged_frame = self._merge_on_gpu(merged_frame, face_resolution, face_align_img, face_align_mask_img, face_align_lmrks_mask_img, face_swap_img, face_swap_mask_img, aligned_to_source_uni_mat, frame_width, frame_height, do_color_compression=do_color_compression )
|
||||
image_to_align_uni_mat = fsi.image_to_align_uni_mat
|
||||
face_resolution = fsi.face_resolution
|
||||
|
||||
face_align_img = bcd.get_image(fsi.face_align_image_name)
|
||||
face_align_lmrks_mask_img = bcd.get_image(fsi.face_align_lmrks_mask_name)
|
||||
face_align_mask_img = bcd.get_image(fsi.face_align_mask_name)
|
||||
face_swap_img = bcd.get_image(fsi.face_swap_image_name)
|
||||
face_swap_mask_img = bcd.get_image(fsi.face_swap_mask_name)
|
||||
|
||||
if all_is_not_None(face_resolution, face_align_img, face_align_mask_img, face_swap_img, face_swap_mask_img, image_to_align_uni_mat):
|
||||
has_merged_faces = True
|
||||
face_height, face_width = face_align_img.shape[:2]
|
||||
frame_height, frame_width = merged_frame.shape[:2]
|
||||
aligned_to_source_uni_mat = image_to_align_uni_mat.invert()
|
||||
aligned_to_source_uni_mat = aligned_to_source_uni_mat.source_translated(-state.face_x_offset, -state.face_y_offset)
|
||||
aligned_to_source_uni_mat = aligned_to_source_uni_mat.source_scaled_around_center(state.face_scale,state.face_scale)
|
||||
aligned_to_source_uni_mat = aligned_to_source_uni_mat.to_exact_mat (face_width, face_height, frame_width, frame_height)
|
||||
|
||||
do_color_compression = fsi_id == fsi_list_len-1
|
||||
if state.device == 'CPU':
|
||||
merged_frame = self._merge_on_cpu(merged_frame, face_resolution, face_align_img, face_align_mask_img, face_align_lmrks_mask_img, face_swap_img, face_swap_mask_img, aligned_to_source_uni_mat, frame_width, frame_height, do_color_compression=do_color_compression )
|
||||
else:
|
||||
merged_frame = self._merge_on_gpu(merged_frame, face_resolution, face_align_img, face_align_mask_img, face_align_lmrks_mask_img, face_swap_img, face_swap_mask_img, aligned_to_source_uni_mat, frame_width, frame_height, do_color_compression=do_color_compression )
|
||||
|
||||
if has_merged_faces:
|
||||
# keep image in float32 in order not to extra load FaceMerger
|
||||
|
|
|
@ -43,6 +43,7 @@ class SourceType(IntEnum):
|
|||
MERGED_FRAME_OR_SOURCE_FRAME = 4
|
||||
SOURCE_N_MERGED_FRAME = 5
|
||||
SOURCE_N_MERGED_FRAME_OR_SOURCE_FRAME = 6
|
||||
ALIGNED_N_SWAPPED_FACE = 7
|
||||
|
||||
ViewModeNames = ['@StreamOutput.SourceType.SOURCE_FRAME',
|
||||
'@StreamOutput.SourceType.ALIGNED_FACE',
|
||||
|
@ -51,6 +52,7 @@ ViewModeNames = ['@StreamOutput.SourceType.SOURCE_FRAME',
|
|||
'@StreamOutput.SourceType.MERGED_FRAME_OR_SOURCE_FRAME',
|
||||
'@StreamOutput.SourceType.SOURCE_N_MERGED_FRAME',
|
||||
'@StreamOutput.SourceType.SOURCE_N_MERGED_FRAME_OR_SOURCE_FRAME',
|
||||
'@StreamOutput.SourceType.ALIGNED_N_SWAPPED_FACE',
|
||||
]
|
||||
|
||||
|
||||
|
@ -75,9 +77,9 @@ class StreamOutputWorker(BackendWorker):
|
|||
|
||||
self._wnd_name = 'DeepFaceLive output'
|
||||
self._wnd_showing = False
|
||||
|
||||
|
||||
self._streamer = FFMPEGStreamer()
|
||||
|
||||
|
||||
lib_os.set_timer_resolution(1)
|
||||
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
|
@ -91,7 +93,7 @@ class StreamOutputWorker(BackendWorker):
|
|||
cs.is_streaming.call_on_flag(self.on_cs_is_streaming)
|
||||
cs.stream_addr.call_on_text(self.on_cs_stream_addr)
|
||||
cs.stream_port.call_on_number(self.on_cs_stream_port)
|
||||
|
||||
|
||||
cs.source_type.enable()
|
||||
cs.source_type.set_choices(SourceType, ViewModeNames, none_choice_name='@misc.menu_select')
|
||||
cs.source_type.select(state.source_type)
|
||||
|
@ -120,23 +122,23 @@ class StreamOutputWorker(BackendWorker):
|
|||
|
||||
cs.save_fill_frame_gap.enable()
|
||||
cs.save_fill_frame_gap.set_flag(state.save_fill_frame_gap if state.save_fill_frame_gap is not None else True )
|
||||
|
||||
|
||||
cs.is_streaming.enable()
|
||||
cs.is_streaming.set_flag(state.is_streaming if state.is_streaming is not None else False )
|
||||
|
||||
|
||||
cs.stream_addr.enable()
|
||||
cs.stream_addr.set_text(state.stream_addr if state.stream_addr is not None else '127.0.0.1')
|
||||
|
||||
|
||||
cs.stream_port.enable()
|
||||
cs.stream_port.set_config(lib_csw.Number.Config(min=1, max=9999, decimals=0, allow_instant_update=True))
|
||||
cs.stream_port.set_number(state.stream_port if state.stream_port is not None else 1234)
|
||||
|
||||
def on_stop(self):
|
||||
self._streamer.stop()
|
||||
|
||||
|
||||
def on_cs_source_type(self, idx, source_type):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
if source_type == SourceType.ALIGNED_FACE:
|
||||
if source_type in [SourceType.ALIGNED_FACE, SourceType.ALIGNED_N_SWAPPED_FACE]:
|
||||
cs.aligned_face_id.enable()
|
||||
cs.aligned_face_id.set_config(lib_csw.Number.Config(min=0, max=16, step=1, allow_instant_update=True))
|
||||
cs.aligned_face_id.set_number(state.aligned_face_id or 0)
|
||||
|
@ -210,19 +212,19 @@ class StreamOutputWorker(BackendWorker):
|
|||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
state.is_streaming = is_streaming
|
||||
self.save_state()
|
||||
|
||||
|
||||
def on_cs_stream_addr(self, stream_addr):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
state.stream_addr = stream_addr
|
||||
self.save_state()
|
||||
self._streamer.set_addr_port(state.stream_addr, state.stream_port)
|
||||
|
||||
|
||||
def on_cs_stream_port(self, stream_port):
|
||||
state, cs = self.get_state(), self.get_control_sheet()
|
||||
state.stream_port = stream_port
|
||||
self.save_state()
|
||||
self._streamer.set_addr_port(state.stream_addr, state.stream_port)
|
||||
|
||||
|
||||
def on_tick(self):
|
||||
cs, state = self.get_control_sheet(), self.get_state()
|
||||
|
||||
|
@ -278,6 +280,24 @@ class StreamOutputWorker(BackendWorker):
|
|||
if source_frame is not None and merged_frame is not None:
|
||||
view_image = np.concatenate( (source_frame, merged_frame), 1 )
|
||||
|
||||
elif source_type == SourceType.ALIGNED_N_SWAPPED_FACE:
|
||||
aligned_face_id = state.aligned_face_id
|
||||
aligned_face = None
|
||||
swapped_face = None
|
||||
for i, fsi in enumerate(bcd.get_face_swap_info_list()):
|
||||
if aligned_face_id == i:
|
||||
aligned_face = bcd.get_image(fsi.face_align_image_name)
|
||||
break
|
||||
|
||||
for fsi in bcd.get_face_swap_info_list():
|
||||
swapped_face = bcd.get_image(fsi.face_swap_image_name)
|
||||
if swapped_face is not None:
|
||||
break
|
||||
|
||||
if aligned_face is not None and swapped_face is not None:
|
||||
view_image = np.concatenate( (aligned_face, swapped_face), 1 )
|
||||
|
||||
|
||||
if view_image is not None:
|
||||
buffered_frames.add_buffer( bcd.get_frame_timestamp(), view_image )
|
||||
|
||||
|
@ -321,7 +341,7 @@ class Sheet:
|
|||
self.is_streaming = lib_csw.Flag.Client()
|
||||
self.stream_addr = lib_csw.Text.Client()
|
||||
self.stream_port = lib_csw.Number.Client()
|
||||
|
||||
|
||||
class Worker(lib_csw.Sheet.Worker):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -336,7 +356,7 @@ class Sheet:
|
|||
self.is_streaming = lib_csw.Flag.Host()
|
||||
self.stream_addr = lib_csw.Text.Host()
|
||||
self.stream_port = lib_csw.Number.Host()
|
||||
|
||||
|
||||
class WorkerState(BackendWorkerState):
|
||||
source_type : SourceType = None
|
||||
is_showing_window : bool = None
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
from .BackendBase import (BackendConnection, BackendConnectionData, BackendDB,
|
||||
BackendSignal, BackendWeakHeap, BackendHost, BackendWorker)
|
||||
BackendHost, BackendSignal, BackendWeakHeap,
|
||||
BackendWorker)
|
||||
from .CameraSource import CameraSource
|
||||
from .FaceAligner import FaceAligner
|
||||
from .FaceAnimator import FaceAnimator
|
||||
from .FaceDetector import FaceDetector
|
||||
from .FaceMarker import FaceMarker
|
||||
from .FaceMerger import FaceMerger
|
||||
|
|
|
@ -6,12 +6,15 @@ from .widgets.QBackendPanel import QBackendPanel
|
|||
from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag
|
||||
from .widgets.QLabelPopupInfo import QLabelPopupInfo
|
||||
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
|
||||
|
||||
from .widgets.QComboBoxCSWDynamicSingleSwitch import QComboBoxCSWDynamicSingleSwitch
|
||||
|
||||
class QFaceAligner(QBackendPanel):
|
||||
def __init__(self, backend : FaceAligner):
|
||||
cs = backend.get_control_sheet()
|
||||
|
||||
q_align_mode_label = QLabelPopupInfo(label=L('@QFaceAligner.align_mode'), popup_info_text=L('@QFaceAligner.help.align_mode'))
|
||||
q_align_mode = QComboBoxCSWDynamicSingleSwitch(cs.align_mode, reflect_state_widgets=[q_align_mode_label])
|
||||
|
||||
q_face_coverage_label = QLabelPopupInfo(label=L('@QFaceAligner.face_coverage'), popup_info_text=L('@QFaceAligner.help.face_coverage') )
|
||||
q_face_coverage = QSpinBoxCSWNumber(cs.face_coverage, reflect_state_widgets=[q_face_coverage_label])
|
||||
|
||||
|
@ -24,6 +27,9 @@ class QFaceAligner(QBackendPanel):
|
|||
q_head_mode_label = QLabelPopupInfo(label=L('@QFaceAligner.head_mode'), popup_info_text=L('@QFaceAligner.help.head_mode') )
|
||||
q_head_mode = QCheckBoxCSWFlag(cs.head_mode, reflect_state_widgets=[q_head_mode_label])
|
||||
|
||||
q_freeze_z_rotation_label = QLabelPopupInfo(label=L('@QFaceAligner.freeze_z_rotation') )
|
||||
q_freeze_z_rotation = QCheckBoxCSWFlag(cs.freeze_z_rotation, reflect_state_widgets=[q_freeze_z_rotation_label])
|
||||
|
||||
q_x_offset_label = QLabelPopupInfo(label=L('@QFaceAligner.x_offset'))
|
||||
q_x_offset = QSpinBoxCSWNumber(cs.x_offset, reflect_state_widgets=[q_x_offset_label])
|
||||
|
||||
|
@ -32,6 +38,9 @@ class QFaceAligner(QBackendPanel):
|
|||
|
||||
grid_l = qtx.QXGridLayout(spacing=5)
|
||||
row = 0
|
||||
grid_l.addWidget(q_align_mode_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addWidget(q_align_mode, row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
grid_l.addWidget(q_face_coverage_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addWidget(q_face_coverage, row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
|
@ -44,6 +53,9 @@ class QFaceAligner(QBackendPanel):
|
|||
grid_l.addWidget(q_head_mode_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addWidget(q_head_mode, row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
grid_l.addWidget(q_freeze_z_rotation_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addWidget(q_freeze_z_rotation, row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
grid_l.addLayout( qtx.QXVBoxLayout([q_x_offset_label, q_y_offset_label]), row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addLayout( qtx.QXHBoxLayout([q_x_offset, q_y_offset]), row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
|
|
67
apps/DeepFaceLive/ui/QFaceAnimator.py
Normal file
|
@ -0,0 +1,67 @@
|
|||
from pathlib import Path
|
||||
|
||||
from localization import L
|
||||
from resources.gfx import QXImageDB
|
||||
from xlib import qt as qtx
|
||||
|
||||
from ..backend import FaceAnimator
|
||||
from .widgets.QBackendPanel import QBackendPanel
|
||||
from .widgets.QCheckBoxCSWFlag import QCheckBoxCSWFlag
|
||||
from .widgets.QComboBoxCSWDynamicSingleSwitch import \
|
||||
QComboBoxCSWDynamicSingleSwitch
|
||||
from .widgets.QLabelPopupInfo import QLabelPopupInfo
|
||||
from .widgets.QSpinBoxCSWNumber import QSpinBoxCSWNumber
|
||||
|
||||
from .widgets.QXPushButtonCSWSignal import QXPushButtonCSWSignal
|
||||
from .widgets.QSliderCSWNumber import QSliderCSWNumber
|
||||
|
||||
class QFaceAnimator(QBackendPanel):
|
||||
def __init__(self, backend : FaceAnimator, animatables_path : Path):
|
||||
self._animatables_path = animatables_path
|
||||
|
||||
cs = backend.get_control_sheet()
|
||||
|
||||
btn_open_folder = self.btn_open_folder = qtx.QXPushButton(image = QXImageDB.eye_outline('light gray'), tooltip_text='Reveal in Explorer', released=self._btn_open_folder_released, fixed_size=(24,22) )
|
||||
|
||||
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
|
||||
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
|
||||
|
||||
q_animatable_label = QLabelPopupInfo(label=L('@QFaceAnimator.animatable') )
|
||||
q_animatable = QComboBoxCSWDynamicSingleSwitch(cs.animatable, reflect_state_widgets=[q_animatable_label, btn_open_folder])
|
||||
|
||||
q_animator_face_id_label = QLabelPopupInfo(label=L('@QFaceAnimator.animator_face_id') )
|
||||
q_animator_face_id = QSpinBoxCSWNumber(cs.animator_face_id, reflect_state_widgets=[q_animator_face_id_label])
|
||||
|
||||
q_relative_mode_label = QLabelPopupInfo(label=L('@QFaceAnimator.relative_mode') )
|
||||
q_relative_mode = QCheckBoxCSWFlag(cs.relative_mode, reflect_state_widgets=[q_relative_mode_label])
|
||||
|
||||
q_relative_power = QSliderCSWNumber(cs.relative_power)
|
||||
|
||||
q_update_animatables = QXPushButtonCSWSignal(cs.update_animatables, image=QXImageDB.reload_outline('light gray'), button_size=(24,22) )
|
||||
|
||||
q_reset_reference_pose = QXPushButtonCSWSignal(cs.reset_reference_pose, text=L('@QFaceAnimator.reset_reference_pose') )
|
||||
|
||||
grid_l = qtx.QXGridLayout( spacing=5)
|
||||
row = 0
|
||||
grid_l.addWidget(q_device_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addWidget(q_device, row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
grid_l.addWidget(q_animatable_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addLayout(qtx.QXHBoxLayout([q_animatable, 2, btn_open_folder, 2, q_update_animatables]), row, 1 )
|
||||
row += 1
|
||||
grid_l.addWidget(q_animator_face_id_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addWidget(q_animator_face_id, row, 1, alignment=qtx.AlignLeft )
|
||||
row += 1
|
||||
grid_l.addWidget(q_relative_mode_label, row, 0, alignment=qtx.AlignRight | qtx.AlignVCenter )
|
||||
grid_l.addLayout(qtx.QXHBoxLayout([q_relative_mode,2,q_relative_power]), row, 1, alignment=qtx.AlignLeft )
|
||||
|
||||
|
||||
row += 1
|
||||
grid_l.addWidget(q_reset_reference_pose, row, 0, 1, 2 )
|
||||
row += 1
|
||||
|
||||
super().__init__(backend, L('@QFaceAnimator.module_title'),
|
||||
layout=qtx.QXVBoxLayout([grid_l]) )
|
||||
|
||||
def _btn_open_folder_released(self):
|
||||
qtx.QDesktopServices.openUrl(qtx.QUrl.fromLocalFile( str(self._animatables_path) ))
|
|
@ -31,7 +31,7 @@ class QFaceDetector(QBackendPanel):
|
|||
q_detector_type_label = QLabelPopupInfo(label=L('@QFaceDetector.detector_type'), popup_info_text=L('@QFaceDetector.help.detector_type') )
|
||||
q_detector_type = QComboBoxCSWDynamicSingleSwitch(cs.detector_type, reflect_state_widgets=[q_detector_type_label])
|
||||
|
||||
q_device_label = QLabelPopupInfo(label=L('@QFaceDetector.device'), popup_info_text=L('@QFaceDetector.help.device') )
|
||||
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
|
||||
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
|
||||
|
||||
q_fixed_window_size_label = QLabelPopupInfo(label=L('@QFaceDetector.window_size'), popup_info_text=L('@QFaceDetector.help.window_size') )
|
||||
|
|
|
@ -15,7 +15,7 @@ class QFaceMarker(QBackendPanel):
|
|||
q_marker_type_label = QLabelPopupInfo(label=L('@QFaceMarker.marker_type'), popup_info_text=L('@QFaceMarker.help.marker_type') )
|
||||
q_marker_type = QComboBoxCSWDynamicSingleSwitch(cs.marker_type, reflect_state_widgets=[q_marker_type_label])
|
||||
|
||||
q_device_label = QLabelPopupInfo(label=L('@QFaceMarker.device'), popup_info_text=L('@QFaceMarker.help.device') )
|
||||
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
|
||||
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
|
||||
|
||||
q_marker_coverage_label = QLabelPopupInfo(label=L('@QFaceMarker.marker_coverage'), popup_info_text=L('@QFaceMarker.help.marker_coverage') )
|
||||
|
|
|
@ -14,7 +14,7 @@ class QFaceMerger(QBackendPanel):
|
|||
def __init__(self, backend):
|
||||
cs = backend.get_control_sheet()
|
||||
|
||||
q_device_label = QLabelPopupInfo(label=L('@QFaceMerger.device'), popup_info_text=L('@QFaceMerger.help.device'))
|
||||
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device'))
|
||||
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
|
||||
|
||||
q_face_x_offset_label = QLabelPopupInfo(label=L('@QFaceMerger.face_x_offset'))
|
||||
|
|
|
@ -33,7 +33,7 @@ class QFaceSwapper(QBackendPanel):
|
|||
|
||||
q_model_info_label = self._q_model_info_label = QLabelPopupInfoCSWInfoLabel(cs.model_info_label)
|
||||
|
||||
q_device_label = QLabelPopupInfo(label=L('@QFaceSwapper.device'), popup_info_text=L('@QFaceSwapper.help.device') )
|
||||
q_device_label = QLabelPopupInfo(label=L('@common.device'), popup_info_text=L('@common.help.device') )
|
||||
q_device = QComboBoxCSWDynamicSingleSwitch(cs.device, reflect_state_widgets=[q_device_label])
|
||||
|
||||
q_swap_all_faces_label = QLabelPopupInfo(label=L('@QFaceSwapper.swap_all_faces') )
|
||||
|
|
|
@ -26,7 +26,9 @@ class QComboBoxCSWDynamicSingleSwitch(QCSWControl):
|
|||
super().__init__(csw_control=csw_switch, reflect_state_widgets=reflect_state_widgets, layout=self._main_l)
|
||||
|
||||
def _on_csw_choices(self, choices, choices_names, none_choice_name : Union[str,None]):
|
||||
idx = 0
|
||||
if self._combobox is not None:
|
||||
idx = self._combobox.currentIndex()
|
||||
self._main_l.removeWidget(self._combobox)
|
||||
self._combobox.deleteLater()
|
||||
|
||||
|
@ -40,7 +42,9 @@ class QComboBoxCSWDynamicSingleSwitch(QCSWControl):
|
|||
|
||||
for choice_name in choices_names:
|
||||
combobox.addItem( qtx.QIcon(), L(choice_name) )
|
||||
|
||||
|
||||
combobox.setCurrentIndex(idx)
|
||||
|
||||
self._main_l.addWidget(combobox)
|
||||
|
||||
def on_combobox_index_changed(self, idx):
|
||||
|
|
BIN
build/animatables/Biden.png
Normal file
After Width: | Height: | Size: 1.8 MiB |
BIN
build/animatables/Elon_Musk.png
Normal file
After Width: | Height: | Size: 244 KiB |
BIN
build/animatables/Elon_Musk_blue_bg.png
Normal file
After Width: | Height: | Size: 242 KiB |
BIN
build/animatables/Kim Chen Yin.png
Normal file
After Width: | Height: | Size: 596 KiB |
BIN
build/animatables/Lukashenko.png
Normal file
After Width: | Height: | Size: 288 KiB |
BIN
build/animatables/Putin.png
Normal file
After Width: | Height: | Size: 230 KiB |
BIN
build/animatables/Putin2.png
Normal file
After Width: | Height: | Size: 461 KiB |
|
@ -9,7 +9,7 @@ RUN ln -s /usr/bin/python3 /usr/bin/python
|
|||
RUN git clone https://github.com/iperov/DeepFaceLive.git
|
||||
|
||||
RUN python -m pip install --upgrade pip
|
||||
RUN python -m pip install onnxruntime-gpu==1.11.1 numpy==1.21.5 h5py numexpr opencv-python==4.5.5.64 opencv-contrib-python==4.5.5.64 pyqt6==6.3.0 onnx==1.11.0 torch==1.8.1 torchvision==0.9.1
|
||||
RUN python -m pip install onnxruntime-gpu==1.11.1 numpy==1.21.6 h5py numexpr opencv-python==4.5.5.64 opencv-contrib-python==4.5.5.64 pyqt6==6.3.0 onnx==1.11.0 torch==1.8.1 torchvision==0.9.1
|
||||
|
||||
WORKDIR /app/DeepFaceLive
|
||||
COPY example.sh example.sh
|
||||
|
|
BIN
build/samples/Obama_speaking.mp4
Normal file
|
@ -470,8 +470,8 @@ def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.7.9', backe
|
|||
clear_release_path=True)
|
||||
|
||||
# PIP INSTALLATIONS
|
||||
|
||||
builder.install_pip_package('numpy==1.21.5')
|
||||
|
||||
builder.install_pip_package('numpy==1.21.6')
|
||||
builder.install_pip_package('h5py')
|
||||
builder.install_pip_package('numexpr')
|
||||
builder.install_pip_package('opencv-python==4.5.5.64')
|
||||
|
@ -522,6 +522,9 @@ def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.7.9', backe
|
|||
print('Copying samples.')
|
||||
shutil.copytree( str(Path(__file__).parent.parent / 'samples'), str(userdata_path / 'samples') )
|
||||
|
||||
print('Copying animatables.')
|
||||
shutil.copytree( str(Path(__file__).parent.parent / 'animatables'), str(userdata_path / 'animatables') )
|
||||
|
||||
if backend == 'cuda':
|
||||
builder.create_run_python_script('DeepFaceLive.bat', 'DeepFaceLive\\main.py', 'run DeepFaceLive --userdata-dir="%~dp0userdata"')
|
||||
elif backend == 'directml':
|
||||
|
|
BIN
doc/FaceAnimator_tutor.mp4
Normal file
BIN
doc/face_animator_example.gif
Normal file
After Width: | Height: | Size: 11 MiB |
|
@ -44,6 +44,20 @@ class Localization:
|
|||
'es-ES' : '--seleccionar--',
|
||||
'it-IT' : '--selezionare--'},
|
||||
|
||||
'common.device':{
|
||||
'en-US' : 'Device',
|
||||
'ru-RU' : 'Устройство',
|
||||
'zh-CN' : '设备',
|
||||
'es-ES' : 'Dispositivo',
|
||||
'it-IT' : 'Dispositivo'},
|
||||
|
||||
'common.help.device':{
|
||||
'en-US' : 'Adjust the combination of module devices to achieve higher fps or lower CPU usage.',
|
||||
'ru-RU' : 'Настройте комбинации устройств модулей для достижения высоких кадр/сек либо снижения нагрузки на процессор.',
|
||||
'zh-CN' : '调整模块设备的组合以实现更高的fps或更低的CPU使用率',
|
||||
'es-ES' : 'Ajuste la combinación de dispositivos del módulo para lograr más FPS o una menor utilización de CPU.',
|
||||
'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere fps più alti o un minore utilizzo della CPU.'},
|
||||
|
||||
'QBackendPanel.start':{
|
||||
'en-US' : 'Start',
|
||||
'ru-RU' : 'Запустить',
|
||||
|
@ -319,20 +333,6 @@ class Localization:
|
|||
'es-ES' : 'Diferentes tipos de detecciones funcionan de manera distinta.',
|
||||
'it-IT' : 'Diversi tipi di rilevatori funzionano in maniera diversa'},
|
||||
|
||||
'QFaceDetector.device':{
|
||||
'en-US' : 'Device',
|
||||
'ru-RU' : 'Устройство',
|
||||
'zh-CN' : '设备',
|
||||
'es-ES' : 'Dispositivo',
|
||||
'it-IT' : 'Dispositivo'},
|
||||
|
||||
'QFaceDetector.help.device':{
|
||||
'en-US' : 'Adjust the combination of module devices to achieve higher fps or lower CPU usage.',
|
||||
'ru-RU' : 'Настройте комбинации устройств модулей для достижения высоких кадр/сек либо снижения нагрузки на процессор.',
|
||||
'zh-CN' : '调整模块设备的组合以实现更高的fps或更低的CPU使用率',
|
||||
'es-ES' : 'Ajuste la combinación de dispositivos del módulo para lograr más FPS o una menor utilización de CPU.',
|
||||
'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere fps più alti o un minore utilizzo della CPU.'},
|
||||
|
||||
'QFaceDetector.window_size':{
|
||||
'en-US' : 'Window size',
|
||||
'ru-RU' : 'Размер окна',
|
||||
|
@ -345,7 +345,7 @@ class Localization:
|
|||
'ru-RU' : 'Меньший размер окна быстрее, но менее точен.',
|
||||
'zh-CN' : '检测窗口越小越快,但越不精准',
|
||||
'es-ES' : 'Menor tamaño de ventana es más rápido, pero menos preciso.',
|
||||
'it-IT' : 'Una dimensione della finestra minore è più veloce, ma meno accurata.'},
|
||||
'it-IT' : 'Una dimensione della finestra minore è più veloce, ma meno accurata.'},
|
||||
|
||||
'QFaceDetector.threshold':{
|
||||
'en-US' : 'Threshold',
|
||||
|
@ -417,6 +417,20 @@ class Localization:
|
|||
'es-ES' : 'Alineador de caras',
|
||||
'it-IT' : 'Allineatore facciale'},
|
||||
|
||||
'QFaceAligner.align_mode':{
|
||||
'en-US' : 'Align mode',
|
||||
'ru-RU' : 'Режим выравнивания',
|
||||
'zh-CN' : '对齐模式',
|
||||
'es-ES' : 'Modo de alineación',
|
||||
'it-IT' : 'Modalità di allineamento'},
|
||||
|
||||
'QFaceAligner.help.align_mode':{
|
||||
'en-US' : 'From rectangle is good for Face Animator. From points is good for face swapper.',
|
||||
'ru-RU' : 'Из прямоугольника подходит для Face Animator. Из точек подходит для программы смены лиц.',
|
||||
'zh-CN' : '从矩形来看,适合于脸部动画师。从点上看,适合换脸的人。',
|
||||
'es-ES' : 'Desde el rectángulo es bueno para el animador de la cara. Desde los puntos es adecuado para los cambiadores de cara.',
|
||||
'it-IT' : "Il rettangolo è adatto all'animatore di volti. Dai punti è adatto a chi cambia il volto."},
|
||||
|
||||
'QFaceAligner.face_coverage':{
|
||||
'en-US' : 'Face coverage',
|
||||
'ru-RU' : 'Покрытие лица',
|
||||
|
@ -473,6 +487,13 @@ class Localization:
|
|||
'es-ES' : 'Modo HEAD. Usado con el modelo HEAD.',
|
||||
'it-IT' : 'Modalità testa. Usala con un modello HEAD.'},
|
||||
|
||||
'QFaceAligner.freeze_z_rotation':{
|
||||
'en-US' : 'Freeze Z rotation',
|
||||
'ru-RU' : 'Заморозить Z поворот',
|
||||
'zh-CN' : '冻结Z旋转',
|
||||
'es-ES' : 'Congelar la rotación Z',
|
||||
'it-IT' : 'Congelare la rotazione Z'},
|
||||
|
||||
'QFaceAligner.x_offset':{
|
||||
'en-US' : 'X offset',
|
||||
'ru-RU' : 'Смещение по X',
|
||||
|
@ -508,20 +529,6 @@ class Localization:
|
|||
'es-ES' : 'Tipo de marcador de caras.',
|
||||
'it-IT' : 'Tipo di marcatore facciale.'},
|
||||
|
||||
'QFaceMarker.device':{
|
||||
'en-US' : 'Device',
|
||||
'ru-RU' : 'Устройство',
|
||||
'zh-CN' : '设备',
|
||||
'es-ES' : 'Dispositivo',
|
||||
'it-IT' : 'Dispositivo'},
|
||||
|
||||
'QFaceMarker.help.device':{
|
||||
'en-US' : 'Adjust the combination of module devices to achieve higher fps or lower CPU usage.',
|
||||
'ru-RU' : 'Настройте комбинации устройств модулей для достижения высоких кадр/сек либо снижения нагрузки на процессор.',
|
||||
'zh-CN' : '调整模块设备的组合以实现更高的fps或更低的CPU使用率',
|
||||
'es-ES' : 'Ajuste la combinación de dispositivos del módulo para alcanzar un mayor número de FPS o una menor carga de CPU.',
|
||||
'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere FPS più alti o un utilizzo più basso della CPU'},
|
||||
|
||||
'QFaceMarker.marker_coverage':{
|
||||
'en-US' : 'Marker coverage',
|
||||
'ru-RU' : 'Покрытие маркера',
|
||||
|
@ -550,6 +557,41 @@ class Localization:
|
|||
'es-ES' : 'Estabiliza los puntos de referencia de la cara haciendo un promedio de los fotogramas.\nBueno para usar en escenas estáticas o con cámaras web.',
|
||||
'it-IT' : 'Stabilizza i punti di riferimento della faccia facendo la media dei fotogrammi.\nBuono da usare in scene statiche o con una webcam.'},
|
||||
|
||||
'QFaceAnimator.module_title':{
|
||||
'en-US' : 'Face animator',
|
||||
'ru-RU' : 'Аниматор лица',
|
||||
'zh-CN' : '脸部动画师',
|
||||
'es-ES' : 'Animador de caras',
|
||||
'it-IT' : 'Animatore di volti'},
|
||||
|
||||
'QFaceAnimator.animatable':{
|
||||
'en-US' : 'Animatable',
|
||||
'ru-RU' : 'Анимируемый',
|
||||
'zh-CN' : '可动画化',
|
||||
'es-ES' : 'Animable',
|
||||
'it-IT' : 'Animabile'},
|
||||
|
||||
'QFaceAnimator.animator_face_id':{
|
||||
'en-US' : 'Animator Face ID',
|
||||
'ru-RU' : 'Номер лица аниматора',
|
||||
'zh-CN' : '动画师脸部ID',
|
||||
'es-ES' : 'Animator Face ID',
|
||||
'it-IT' : 'Animatore Face ID'},
|
||||
|
||||
'QFaceAnimator.relative_mode':{
|
||||
'en-US' : 'Relative mode',
|
||||
'ru-RU' : 'Относительный режим',
|
||||
'zh-CN' : '相对模式',
|
||||
'es-ES' : 'Modo relativo',
|
||||
'it-IT' : 'Modalità relativa'},
|
||||
|
||||
'QFaceAnimator.reset_reference_pose':{
|
||||
'en-US' : 'Reset reference pose',
|
||||
'ru-RU' : 'Сбросить относительную позу',
|
||||
'zh-CN' : '重置参考姿态',
|
||||
'es-ES' : 'Restablecer la pose de referencia',
|
||||
'it-IT' : 'Azzeramento della posa di riferimento'},
|
||||
|
||||
'QFaceSwapper.module_title':{
|
||||
'en-US' : 'Face swapper',
|
||||
'ru-RU' : 'Замена лица',
|
||||
|
@ -571,20 +613,6 @@ class Localization:
|
|||
'es-ES' : 'Archivo de modelo desde una carpeta o disponible para descargar desde Internet.\nPuede entrenar su propio modelo en DeepFaceLab.',
|
||||
'it-IT' : 'File del modello da una cartella or disponibile per il download da internet.\nPuoi addestrare il tuo modello su DeepFaceLab.'},
|
||||
|
||||
'QFaceSwapper.device':{
|
||||
'en-US' : 'Device',
|
||||
'ru-RU' : 'Устройство',
|
||||
'zh-CN' : '设备',
|
||||
'es-ES' : 'Dispositivo',
|
||||
'it-IT' : 'Dispositivo'},
|
||||
|
||||
'QFaceSwapper.help.device':{
|
||||
'en-US' : 'Adjust the combination of module devices to achieve higher fps or lower CPU usage.',
|
||||
'ru-RU' : 'Настройте комбинации устройств модулей для достижения высоких кадр/сек либо снижения нагрузки на процессор.',
|
||||
'zh-CN' : '调整模块设备的组合以实现更高的fps或更低的CPU使用率',
|
||||
'es-ES' : 'Ajuste la combinación de dispositivos del módulo para alcanzar un mayor número de FPS o una menor carga de CPU.',
|
||||
'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere FPS più alti o un utilizzo più basso della CPU'},
|
||||
|
||||
'QFaceSwapper.swap_all_faces':{
|
||||
'en-US' : 'Swap all faces',
|
||||
'ru-RU' : 'Заменить все лица',
|
||||
|
@ -704,20 +732,6 @@ class Localization:
|
|||
'es-ES' : 'Fusionador de caras',
|
||||
'it-IT' : 'Unitore di facce (Merger)'},
|
||||
|
||||
'QFaceMerger.device':{
|
||||
'en-US' : 'Device',
|
||||
'ru-RU' : 'Устройство',
|
||||
'zh-CN' : '设备',
|
||||
'es-ES' : 'Dispositivo',
|
||||
'it-IT' : 'Dispositivo'},
|
||||
|
||||
'QFaceMerger.help.device':{
|
||||
'en-US' : 'Adjust the combination of module devices to achieve higher fps or lower CPU usage.',
|
||||
'ru-RU' : 'Настройте комбинации устройств модулей для достижения высоких кадр/сек либо снижения нагрузки на процессор.',
|
||||
'zh-CN' : '调整模块设备的组合以实现更高的fps或更低的CPU使用率。',
|
||||
'es-ES' : 'Ajuste la combinación de dispositivos de los módulos para alcanzar un mayor número de FPS o una menor carga de CPU.',
|
||||
'it-IT' : 'Regola la combinazione dei dispositivi del modulo per ottenere fps più alti o un minore utilizzo della CPU.'},
|
||||
|
||||
'QFaceMerger.face_x_offset':{
|
||||
'en-US' : 'Face X offset',
|
||||
'ru-RU' : 'Смещение лица X',
|
||||
|
@ -970,6 +984,20 @@ class Localization:
|
|||
'es-ES' : 'De abajo a arriba',
|
||||
'it-IT' : "Dal basso verso l'alto"},
|
||||
|
||||
'FaceAligner.AlignMode.FROM_RECT':{
|
||||
'en-US' : 'From rectangle',
|
||||
'ru-RU' : 'Из прямоугольника',
|
||||
'zh-CN' : '从长方形',
|
||||
'es-ES' : 'Desde el rectángulo',
|
||||
'it-IT' : 'Da rettangolo'},
|
||||
|
||||
'FaceAligner.AlignMode.FROM_POINTS':{
|
||||
'en-US' : 'From points',
|
||||
'ru-RU' : 'Из точек',
|
||||
'zh-CN' : '从点',
|
||||
'es-ES' : 'De los puntos',
|
||||
'it-IT' : 'Da punti'},
|
||||
|
||||
'FaceSwapper.model_information':{
|
||||
'en-US' : 'Model information',
|
||||
'ru-RU' : 'Информация о модели',
|
||||
|
@ -1046,4 +1074,13 @@ class Localization:
|
|||
'zh-CN' : '源和融合后的帧则源帧',
|
||||
'es-ES' : 'Frame de origen y fusionado o frame de origen',
|
||||
'it-IT' : 'Fotogramma sorgente e fuso o fotogramma sorgente'},
|
||||
|
||||
'StreamOutput.SourceType.ALIGNED_N_SWAPPED_FACE':{
|
||||
'en-US' : 'Aligned and swapped face',
|
||||
'ru-RU' : 'Выровненное и заменённое лицо',
|
||||
'zh-CN' : '对齐和调换面孔',
|
||||
'es-ES' : 'Cara alineada e intercambiada',
|
||||
'it-IT' : 'Faccia allineata e scambiata'},
|
||||
}
|
||||
|
||||
|
||||
|
|
131
modelhub/onnx/TPSMM/TPSMM.py
Normal file
|
@ -0,0 +1,131 @@
|
|||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from xlib.file import SplittedFile
|
||||
from xlib.image import ImageProcessor
|
||||
from xlib.onnxruntime import (InferenceSession_with_device, ORTDeviceInfo,
|
||||
get_available_devices_info)
|
||||
|
||||
|
||||
class TPSMM:
|
||||
"""
|
||||
[CVPR2022] Thin-Plate Spline Motion Model for Image Animation
|
||||
https://github.com/yoyo-nb/Thin-Plate-Spline-Motion-Model
|
||||
|
||||
arguments
|
||||
|
||||
device_info ORTDeviceInfo
|
||||
|
||||
use TPSMM.get_available_devices()
|
||||
to determine a list of avaliable devices accepted by model
|
||||
|
||||
raises
|
||||
Exception
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def get_available_devices() -> List[ORTDeviceInfo]:
|
||||
return get_available_devices_info()
|
||||
|
||||
def __init__(self, device_info : ORTDeviceInfo):
|
||||
if device_info not in TPSMM.get_available_devices():
|
||||
raise Exception(f'device_info {device_info} is not in available devices for TPSMM')
|
||||
|
||||
generator_path = Path(__file__).parent / 'generator.onnx'
|
||||
SplittedFile.merge(generator_path, delete_parts=False)
|
||||
if not generator_path.exists():
|
||||
raise FileNotFoundError(f'{generator_path} not found')
|
||||
|
||||
kp_detector_path = Path(__file__).parent / 'kp_detector.onnx'
|
||||
if not kp_detector_path.exists():
|
||||
raise FileNotFoundError(f'{kp_detector_path} not found')
|
||||
|
||||
self._generator = InferenceSession_with_device(str(generator_path), device_info)
|
||||
self._kp_detector = InferenceSession_with_device(str(kp_detector_path), device_info)
|
||||
|
||||
|
||||
def get_input_size(self):
|
||||
"""
|
||||
returns optimal (Width,Height) for input images, thus you can resize source image to avoid extra load
|
||||
"""
|
||||
return (256,256)
|
||||
|
||||
def extract_kp(self, img : np.ndarray):
|
||||
"""
|
||||
Extract keypoints from image
|
||||
|
||||
arguments
|
||||
|
||||
img np.ndarray HW HWC 1HWC uint8/float32
|
||||
"""
|
||||
feed_img = ImageProcessor(img).resize(self.get_input_size()).swap_ch().to_ufloat32().ch(3).get_image('NCHW')
|
||||
return self._kp_detector.run(None, {'in': feed_img})[0]
|
||||
|
||||
def generate(self, img_source : np.ndarray, kp_source : np.ndarray, kp_driver : np.ndarray, kp_driver_ref : np.ndarray = None, relative_power : float = 1.0):
|
||||
"""
|
||||
|
||||
arguments
|
||||
|
||||
img_source np.ndarray HW HWC 1HWC uint8/float32
|
||||
|
||||
kp_driver_ref specify to work in kp relative mode
|
||||
"""
|
||||
if kp_driver_ref is not None:
|
||||
kp_driver = self.calc_relative_kp(kp_source=kp_source, kp_driver=kp_driver, kp_driver_ref=kp_driver_ref, power=relative_power)
|
||||
|
||||
theta, control_points, control_params = self.create_transformations_params(kp_source, kp_driver)
|
||||
|
||||
ip = ImageProcessor(img_source)
|
||||
dtype = ip.get_dtype()
|
||||
_,H,W,_ = ip.get_dims()
|
||||
|
||||
feed_img = ip.resize(self.get_input_size()).to_ufloat32().ch(3).get_image('NCHW')
|
||||
|
||||
out = self._generator.run(None, {'in': feed_img,
|
||||
'theta' : theta,
|
||||
'control_points' : control_points,
|
||||
'control_params' : control_params,
|
||||
'kp_driver' : kp_driver,
|
||||
'kp_source' : kp_source,
|
||||
})[0].transpose(0,2,3,1)[0]
|
||||
|
||||
out = ImageProcessor(out).resize( (W,H)).to_dtype(dtype).get_image('HWC')
|
||||
return out
|
||||
|
||||
def calc_relative_kp(self, kp_source, kp_driver, kp_driver_ref, power = 1.0):
|
||||
source_area = np.array([ cv2.contourArea(cv2.convexHull(pts)) for pts in kp_source ], dtype=kp_source.dtype)
|
||||
driving_area = np.array([ cv2.contourArea(cv2.convexHull(pts)) for pts in kp_driver_ref ], dtype=kp_driver_ref.dtype)
|
||||
movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
|
||||
return kp_source + (kp_driver - kp_driver_ref) * movement_scale[:,None,None] * power
|
||||
|
||||
def create_transformations_params(self, kp_source, kp_driver):
|
||||
kp_num=10
|
||||
kp_sub_num=5
|
||||
|
||||
kp_d = kp_driver.reshape(-1, kp_num, kp_sub_num, 2)
|
||||
kp_s = kp_source.reshape(-1, kp_num, kp_sub_num, 2)
|
||||
|
||||
|
||||
K = np.linalg.norm(kp_d[:,:,:,None]-kp_d[:,:,None,:], ord=2, axis=4) ** 2
|
||||
K = K * np.log(K+1e-9)
|
||||
|
||||
kp_1d = np.concatenate([kp_d, np.ones(kp_d.shape[:-1], dtype=kp_d.dtype)[...,None] ], -1)
|
||||
|
||||
P = np.concatenate([kp_1d, np.zeros(kp_d.shape[:2] + (3, 3), dtype=kp_d.dtype)], 2)
|
||||
L = np.concatenate([K,kp_1d.transpose(0,1,3,2)],2)
|
||||
L = np.concatenate([L,P],3)
|
||||
|
||||
Y = np.concatenate([kp_s, np.zeros(kp_d.shape[:2] + (3, 2), dtype=kp_d.dtype)], 2)
|
||||
|
||||
one = np.broadcast_to( np.eye(Y.shape[2], dtype=kp_d.dtype), L.shape)*0.01
|
||||
|
||||
L = L + one
|
||||
|
||||
param = np.matmul(np.linalg.inv(L),Y)
|
||||
|
||||
theta = param[:,:,kp_sub_num:,:].transpose(0,1,3,2)
|
||||
control_points = kp_d
|
||||
control_params = param[:,:,:kp_sub_num,:]
|
||||
return theta, control_points, control_params
|
BIN
modelhub/onnx/TPSMM/generator.onnx.part0
Normal file
BIN
modelhub/onnx/TPSMM/generator.onnx.part1
Normal file
BIN
modelhub/onnx/TPSMM/generator.onnx.part2
Normal file
BIN
modelhub/onnx/TPSMM/generator.onnx.part3
Normal file
BIN
modelhub/onnx/TPSMM/generator.onnx.part4
Normal file
BIN
modelhub/onnx/TPSMM/kp_detector.onnx
Normal file
|
@ -2,4 +2,5 @@ from .CenterFace.CenterFace import CenterFace
|
|||
from .FaceMesh.FaceMesh import FaceMesh
|
||||
from .S3FD.S3FD import S3FD
|
||||
from .YoloV5Face.YoloV5Face import YoloV5Face
|
||||
from .InsightFace2d106.InsightFace2D106 import InsightFace2D106
|
||||
from .InsightFace2d106.InsightFace2D106 import InsightFace2D106
|
||||
from .TPSMM.TPSMM import TPSMM
|
|
@ -6,10 +6,10 @@ from xlib import face as lib_face
|
|||
from xlib import path as lib_path
|
||||
from xlib.file import SplittedFile
|
||||
from xlib import cv as lib_cv
|
||||
import cv2
|
||||
|
||||
repo_root = Path(__file__).parent.parent
|
||||
large_files_list = [ (repo_root / 'modelhub' / 'onnx' / 'S3FD' / 'S3FD.onnx', 48*1024*1024),
|
||||
(repo_root / 'modelhub' / 'onnx' / 'TPSMM' / 'generator.onnx', 50*1024*1024),
|
||||
(repo_root / 'modelhub' / 'torch' / 'S3FD' / 'S3FD.pth', 48*1024*1024),
|
||||
(repo_root / 'modelhub' / 'cv' / 'FaceMarkerLBF' / 'lbfmodel.yaml', 34*1024*1024),
|
||||
]
|
||||
|
@ -23,7 +23,11 @@ def merge_large_files(delete_parts=False):
|
|||
def split_large_files(delete_original=False):
|
||||
for filepath, part_size in large_files_list:
|
||||
print(f'Splitting {filepath}...')
|
||||
SplittedFile.split(filepath, part_size=part_size, delete_original=delete_original)
|
||||
if filepath.exists():
|
||||
SplittedFile.split(filepath, part_size=part_size, delete_original=delete_original)
|
||||
else:
|
||||
print(f'{filepath} not found. Skipping.')
|
||||
|
||||
print('Done')
|
||||
|
||||
def extract_FaceSynthetics(inputdir_path : Path, faceset_path : Path):
|
||||
|
@ -53,7 +57,7 @@ def extract_FaceSynthetics(inputdir_path : Path, faceset_path : Path):
|
|||
"""
|
||||
if faceset_path.suffix != '.dfs':
|
||||
raise ValueError('faceset_path must have .dfs extension.')
|
||||
|
||||
|
||||
filepaths = lib_path.get_files_paths(inputdir_path)
|
||||
fs = lib_face.Faceset(faceset_path, write_access=True, recreate=True)
|
||||
for filepath in lib_con.progress_bar_iterator(filepaths, desc='Processing'):
|
||||
|
@ -89,10 +93,10 @@ def extract_FaceSynthetics(inputdir_path : Path, faceset_path : Path):
|
|||
ufm.set_UImage_uuid(uimg.get_uuid())
|
||||
ufm.set_FRect(flmrks.get_FRect())
|
||||
ufm.add_FLandmarks2D(flmrks)
|
||||
|
||||
fs.add_UFaceMark(ufm)
|
||||
|
||||
fs.add_UFaceMark(ufm)
|
||||
fs.add_UImage(uimg, format='png')
|
||||
|
||||
|
||||
fs.optimize()
|
||||
fs.close()
|
||||
|
||||
|
|
|
@ -2,13 +2,14 @@ from typing import Tuple
|
|||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import numpy.linalg as npla
|
||||
|
||||
from .. import math as lib_math
|
||||
from ..math import Affine2DMat, Affine2DUniMat
|
||||
from .ELandmarks2D import ELandmarks2D
|
||||
from .FRect import FRect
|
||||
from .IState import IState
|
||||
|
||||
|
||||
class FLandmarks2D(IState):
|
||||
def __init__(self):
|
||||
"""
|
||||
|
@ -105,13 +106,14 @@ class FLandmarks2D(IState):
|
|||
r = max(xrt[0], xrb[0])
|
||||
b = max(xlb[1], xrb[1])
|
||||
return FRect.from_ltrb( (l,t,r,b) )
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def calc_cut(self, h_w, coverage : float, output_size : int,
|
||||
exclude_moving_parts : bool = False,
|
||||
head_yaw : float = None,
|
||||
x_offset : float = 0, y_offset : float = 0):
|
||||
x_offset : float = 0, y_offset : float = 0,
|
||||
freeze_z_rotation = False):
|
||||
"""
|
||||
Calculates affine mat for face cut.
|
||||
|
||||
|
@ -129,10 +131,10 @@ class FLandmarks2D(IState):
|
|||
type = ELandmarks2D.L68
|
||||
lmrks = lmrks[ lmrks_106_to_68_mean_pairs ]
|
||||
lmrks = lmrks.reshape( (68,2,2)).mean(1)
|
||||
|
||||
|
||||
if type == ELandmarks2D.L68:
|
||||
mat = Affine2DMat.umeyama( np.concatenate ([ lmrks[17:36], lmrks[36:37], lmrks[39:40], lmrks[42:43], lmrks[45:46], lmrks[48:49], lmrks[54:55] ]), uni_landmarks_68)
|
||||
|
||||
|
||||
elif type == ELandmarks2D.L468:
|
||||
src_lmrks = lmrks
|
||||
dst_lmrks = uni_landmarks_468
|
||||
|
@ -149,13 +151,10 @@ class FLandmarks2D(IState):
|
|||
g_c = g_p[4]
|
||||
|
||||
# calc diagonal vectors between corners in global space
|
||||
tb_diag_vec = (g_p[2]-g_p[0]).astype(np.float32)
|
||||
tb_diag_vec /= npla.norm(tb_diag_vec)
|
||||
bt_diag_vec = (g_p[1]-g_p[3]).astype(np.float32)
|
||||
bt_diag_vec /= npla.norm(bt_diag_vec)
|
||||
tb_diag_vec = lib_math.segment_to_vector(g_p[0], g_p[2]).astype(np.float32)
|
||||
bt_diag_vec = lib_math.segment_to_vector(g_p[3], g_p[1]).astype(np.float32)
|
||||
|
||||
# calc modifier of diagonal vectors for coverage value
|
||||
mod = npla.norm(g_p[0]-g_p[2])*(coverage*0.5)
|
||||
mod = lib_math.segment_length(g_p[0],g_p[4])*coverage
|
||||
|
||||
if head_yaw is not None:
|
||||
# Damp near zero
|
||||
|
@ -164,12 +163,32 @@ class FLandmarks2D(IState):
|
|||
# adjust vertical offset to cover more forehead
|
||||
h_vec = (g_p[1]-g_p[0]).astype(np.float32)
|
||||
v_vec = (g_p[3]-g_p[0]).astype(np.float32)
|
||||
|
||||
|
||||
g_c += h_vec*x_offset + v_vec*y_offset
|
||||
|
||||
l_t = np.array( [ g_c - tb_diag_vec*mod,
|
||||
g_c + bt_diag_vec*mod,
|
||||
g_c + tb_diag_vec*mod ], np.float32 )
|
||||
if not freeze_z_rotation:
|
||||
l_t = np.array([g_c - tb_diag_vec*mod,
|
||||
g_c + bt_diag_vec*mod,
|
||||
g_c + tb_diag_vec*mod], np.float32 )
|
||||
else:
|
||||
# remove_align - face will be centered in the frame but not aligned
|
||||
l_t = np.array([g_c - tb_diag_vec*mod,
|
||||
g_c + bt_diag_vec*mod,
|
||||
g_c + tb_diag_vec*mod,
|
||||
g_c - bt_diag_vec*mod], np.float32 )
|
||||
|
||||
# get area of face square in global space
|
||||
area = 0.5*np.abs(np.dot(l_t[:,0],np.roll(l_t[:,1],1))-np.dot(l_t[:,1],np.roll(l_t[:,0],1)))
|
||||
|
||||
# calc side of square
|
||||
side = np.float32(np.sqrt(area) / 2)
|
||||
|
||||
# calc 3 points with unrotated square
|
||||
l_t = np.array([g_c + [-side,-side],
|
||||
g_c + [ side,-side],
|
||||
g_c + [ side, side]], np.float32 )
|
||||
|
||||
|
||||
|
||||
# calc affine transform from 3 global space points to 3 local space points size of 'output_size'
|
||||
mat = Affine2DMat.from_3_pairs ( l_t, np.float32(( (0,0),(output_size,0),(output_size,output_size) )))
|
||||
|
@ -184,7 +203,8 @@ class FLandmarks2D(IState):
|
|||
exclude_moving_parts : bool = False,
|
||||
head_yaw : float = None,
|
||||
x_offset : float = 0,
|
||||
y_offset : float = 0) -> Tuple[np.ndarray, Affine2DUniMat]:
|
||||
y_offset : float = 0,
|
||||
freeze_z_rotation : bool = False) -> Tuple[np.ndarray, Affine2DUniMat]:
|
||||
"""
|
||||
Cut the face to square of output_size from img using landmarks with given parameters
|
||||
|
||||
|
@ -208,7 +228,7 @@ class FLandmarks2D(IState):
|
|||
"""
|
||||
h,w = img.shape[0:2]
|
||||
|
||||
mat, uni_mat = self.calc_cut( (h,w), coverage, output_size, exclude_moving_parts, head_yaw=head_yaw, x_offset=x_offset, y_offset=y_offset)
|
||||
mat, uni_mat = self.calc_cut( (h,w), coverage, output_size, exclude_moving_parts, head_yaw=head_yaw, x_offset=x_offset, y_offset=y_offset, freeze_z_rotation=freeze_z_rotation)
|
||||
|
||||
face_image = cv2.warpAffine(img, mat, (output_size, output_size), cv2.INTER_CUBIC )
|
||||
return face_image, uni_mat
|
||||
|
@ -243,7 +263,7 @@ lmrks_106_to_68_mean_pairs = [1,9, 10,11, 12,13, 14,15, 16,2, 3,4, 5,6, 7,8, 0,0
|
|||
35,35, 41,40, 40,42, 39,39, 37,33, 33,36,
|
||||
89,89, 95,94, 94,96, 93,93, 91,87, 87,90,
|
||||
52,52, 64,64, 63,63, 71,71, 67,67, 68,68, 61,61, 58,58, 59,59, 53,53, 56,56, 55,55, 65,65, 66,66, 62,62, 70,70, 69,69, 57,57, 60,60, 54,54]
|
||||
|
||||
|
||||
uni_landmarks_68 = np.float32([
|
||||
[ 0.000213256, 0.106454 ], #17
|
||||
[ 0.0752622, 0.038915 ], #18
|
||||
|
|
|
@ -49,4 +49,4 @@ class FPose(IState):
|
|||
mat[2,:] = np.cross(mat[0, :], mat[1, :])
|
||||
pitch, yaw, roll = lib_math.rotation_matrix_to_euler(mat)
|
||||
|
||||
return FPose.from_radians(pitch, yaw, roll)
|
||||
return FPose.from_radians(pitch, yaw*2, roll)
|
||||
|
|
|
@ -174,7 +174,8 @@ class FRect(IState):
|
|||
|
||||
return FRect.from_4pts(pts)
|
||||
|
||||
def cut(self, img : np.ndarray, coverage : float, output_size : int) -> Tuple[Affine2DMat, Affine2DUniMat]:
|
||||
def cut(self, img : np.ndarray, coverage : float, output_size : int,
|
||||
x_offset : float = 0, y_offset : float = 0,) -> Tuple[Affine2DMat, Affine2DUniMat]:
|
||||
"""
|
||||
Cut the face to square of output_size from img with given coverage using this rect
|
||||
|
||||
|
@ -193,15 +194,21 @@ class FRect(IState):
|
|||
mat = Affine2DMat.umeyama(pts, uni_rect, True)
|
||||
|
||||
# get corner points in global space
|
||||
g_p = mat.invert().transform_points ( [(0,0),(0,1),(1,1),(1,0),(0.5,0.5)] )
|
||||
g_p = mat.invert().transform_points ( [(0,0),(1,0),(1,1),(0,1),(0.5,0.5)] )
|
||||
g_c = g_p[4]
|
||||
|
||||
h_vec = (g_p[1]-g_p[0]).astype(np.float32)
|
||||
v_vec = (g_p[3]-g_p[0]).astype(np.float32)
|
||||
|
||||
|
||||
# calc diagonal vectors between corners in global space
|
||||
tb_diag_vec = lib_math.segment_to_vector(g_p[0], g_p[2]).astype(np.float32)
|
||||
bt_diag_vec = lib_math.segment_to_vector(g_p[1], g_p[3]).astype(np.float32)
|
||||
bt_diag_vec = lib_math.segment_to_vector(g_p[3], g_p[1]).astype(np.float32)
|
||||
|
||||
mod = lib_math.segment_length(g_p[0],g_p[4])*coverage
|
||||
|
||||
g_c += h_vec*x_offset + v_vec*y_offset
|
||||
|
||||
l_t = np.array( [ g_c - tb_diag_vec*mod,
|
||||
g_c + bt_diag_vec*mod,
|
||||
g_c + tb_diag_vec*mod ], np.float32 )
|
||||
|
|
|
@ -284,11 +284,11 @@ class Host(Base):
|
|||
# Save only when the process is fully started / stopped
|
||||
self._db.set_value(self._db_key_host_onoff, self._process_status == Host._ProcessStatus.STARTED )
|
||||
|
||||
def restore_on_off_state(self):
|
||||
def restore_on_off_state(self, default_state=True):
|
||||
"""
|
||||
restore saved on_off state from db. Default is on.
|
||||
"""
|
||||
is_on = self._db.get_value(self._db_key_host_onoff, True)
|
||||
is_on = self._db.get_value(self._db_key_host_onoff, default_state)
|
||||
if is_on:
|
||||
self.start()
|
||||
|
||||
|
|
|
@ -137,6 +137,8 @@ class DynamicSingleSwitch:
|
|||
none_choice_name('') str/None if not None, shows None choice with name,
|
||||
by default empty string
|
||||
"""
|
||||
self.unselect()
|
||||
|
||||
# Validate choices
|
||||
if choices is None:
|
||||
raise ValueError('Choices cannot be None.')
|
||||
|
|