mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-19 21:13:20 -07:00
Merge remote-tracking branch 'iperov/master'
This commit is contained in:
commit
73c21eb335
13 changed files with 77 additions and 88 deletions
|
@ -281,6 +281,13 @@ class DFLJPG(object):
|
|||
def has_xseg_mask(self):
|
||||
return self.dfl_dict.get('xseg_mask',None) is not None
|
||||
|
||||
def get_xseg_mask_compressed(self):
|
||||
mask_buf = self.dfl_dict.get('xseg_mask',None)
|
||||
if mask_buf is None:
|
||||
return None
|
||||
|
||||
return mask_buf
|
||||
|
||||
def get_xseg_mask(self):
|
||||
mask_buf = self.dfl_dict.get('xseg_mask',None)
|
||||
if mask_buf is None:
|
||||
|
@ -301,7 +308,7 @@ class DFLJPG(object):
|
|||
mask_a = imagelib.normalize_channels(mask_a, 1)
|
||||
img_data = np.clip( mask_a*255, 0, 255 ).astype(np.uint8)
|
||||
|
||||
data_max_len = 4096
|
||||
data_max_len = 8192
|
||||
|
||||
ret, buf = cv2.imencode('.png', img_data)
|
||||
|
||||
|
|
|
@ -29,11 +29,11 @@ More than 95% of deepfake videos are created with DeepFaceLab.
|
|||
|
||||
DeepFaceLab is used by such popular youtube channels as
|
||||
|
||||
| [Ctrl Shift Face](https://www.youtube.com/channel/UCKpH0CKltc73e4wh0_pgL3g)| [VFXChris Ume](https://www.youtube.com/channel/UCGf4OlX_aTt8DlrgiH3jN3g/videos)|
|
||||
| [VFXChris Ume](https://www.youtube.com/channel/UCGf4OlX_aTt8DlrgiH3jN3g/videos)| [Sham00k](https://www.youtube.com/channel/UCZXbWcv7fSZFTAZV4beckyw/videos)|
|
||||
|---|---|
|
||||
|
||||
| [Sham00k](https://www.youtube.com/channel/UCZXbWcv7fSZFTAZV4beckyw/videos)| [Collider videos](https://www.youtube.com/watch?v=A91P2qtPT54&list=PLayt6616lBclvOprvrC8qKGCO-mAhPRux)| [iFake](https://www.youtube.com/channel/UCC0lK2Zo2BMXX-k1Ks0r7dg/videos)|
|
||||
|---|---|---|
|
||||
| [Collider videos](https://www.youtube.com/watch?v=A91P2qtPT54&list=PLayt6616lBclvOprvrC8qKGCO-mAhPRux)| [iFake](https://www.youtube.com/channel/UCC0lK2Zo2BMXX-k1Ks0r7dg/videos)|
|
||||
|---|---|
|
||||
|
||||
| [NextFace](https://www.youtube.com/channel/UCFh3gL0a8BS21g-DHvXZEeQ/videos)| [Futuring Machine](https://www.youtube.com/channel/UCC5BbFxqLQgfnWPhprmQLVg)| [RepresentUS](https://www.youtube.com/channel/UCRzgK52MmetD9aG8pDOID3g)|
|
||||
|---|---|---|
|
||||
|
@ -317,7 +317,7 @@ QQ 951138799
|
|||
</td></tr>
|
||||
|
||||
<tr><td colspan=2 align="center">
|
||||
<a href="https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=lepersorium@gmail.com&lc=US&no_note=0&item_name=Support+DeepFaceLab&cn=&curency_code=USD&bn=PP-DonationsBF:btn_donateCC_LG.gif:NonHosted">Donate via Paypal</a>
|
||||
<a href="https://www.paypal.com/paypalme/DeepFaceLab">Donate via Paypal</a>
|
||||
</td></tr>
|
||||
|
||||
<tr><td colspan=2 align="center">
|
||||
|
|
|
@ -70,19 +70,23 @@ class nn():
|
|||
first_run = True
|
||||
os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path)
|
||||
|
||||
os.environ['CUDA_CACHE_MAXSIZE'] = '536870912' #512Mb (32mb default)
|
||||
#nvcuda.dll ignores this param : os.environ['CUDA_CACHE_MAXSIZE'] = '536870912' #512Mb (32mb default)
|
||||
os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '2'
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # tf log errors only
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tf log errors only
|
||||
|
||||
if first_run:
|
||||
io.log_info("Caching GPU kernels...")
|
||||
|
||||
import tensorflow as tf
|
||||
nn.tf = tf
|
||||
#import tensorflow as tf
|
||||
import tensorflow.compat.v1 as tf
|
||||
|
||||
import logging
|
||||
# Disable tensorflow warnings
|
||||
logging.getLogger('tensorflow').setLevel(logging.ERROR)
|
||||
tf_logger = logging.getLogger('tensorflow')
|
||||
tf_logger.setLevel(logging.ERROR)
|
||||
|
||||
tf.disable_v2_behavior()
|
||||
nn.tf = tf
|
||||
|
||||
# Initialize framework
|
||||
import core.leras.ops
|
||||
|
|
|
@ -333,7 +333,17 @@ def depth_to_space(x, size):
|
|||
x = tf.reshape(x, (-1, oh, ow, oc, ))
|
||||
return x
|
||||
else:
|
||||
|
||||
b,c,h,w = x.shape.as_list()
|
||||
oh, ow = h * size, w * size
|
||||
oc = c // (size * size)
|
||||
|
||||
x = tf.reshape(x, (-1, size, size, oc, h, w, ) )
|
||||
x = tf.transpose(x, (0, 3, 4, 1, 5, 2))
|
||||
x = tf.reshape(x, (-1, oc, oh, ow))
|
||||
return x
|
||||
return tf.depth_to_space(x, size, data_format=nn.data_format)
|
||||
|
||||
nn.depth_to_space = depth_to_space
|
||||
|
||||
def rgb_to_lab(srgb):
|
||||
|
|
|
@ -10,20 +10,22 @@ class RMSprop(nn.OptimizerBase):
|
|||
raise ValueError('name must be defined.')
|
||||
|
||||
self.lr_dropout = lr_dropout
|
||||
self.lr = lr
|
||||
self.rho = rho
|
||||
self.epsilon = epsilon
|
||||
|
||||
self.clipnorm = clipnorm
|
||||
|
||||
with tf.device('/CPU:0') :
|
||||
with tf.variable_scope(self.name):
|
||||
self.lr = tf.Variable (lr, name="lr")
|
||||
self.rho = tf.Variable (rho, name="rho")
|
||||
self.epsilon = tf.Variable (epsilon, name="epsilon")
|
||||
|
||||
self.iterations = tf.Variable(0, dtype=tf.int64, name='iters')
|
||||
|
||||
self.accumulators_dict = {}
|
||||
self.lr_rnds_dict = {}
|
||||
|
||||
def get_weights(self):
|
||||
return [self.lr, self.rho, self.epsilon, self.iterations] + list(self.accumulators_dict.values())
|
||||
return [self.iterations] + list(self.accumulators_dict.values())
|
||||
|
||||
def initialize_variables(self, trainable_weights, vars_on_cpu=True, lr_dropout_on_cpu=False):
|
||||
# Initialize here all trainable variables used in training
|
||||
|
@ -53,13 +55,11 @@ class RMSprop(nn.OptimizerBase):
|
|||
|
||||
a = self.accumulators_dict[ v.name ]
|
||||
|
||||
rho = tf.cast(self.rho, a.dtype)
|
||||
new_a = rho * a + (1. - rho) * tf.square(g)
|
||||
new_a = self.rho * a + (1. - self.rho) * tf.square(g)
|
||||
|
||||
lr = tf.cast(self.lr, a.dtype)
|
||||
epsilon = tf.cast(self.epsilon, a.dtype)
|
||||
lr = tf.constant(self.lr, g.dtype)
|
||||
|
||||
v_diff = - lr * g / (tf.sqrt(new_a) + epsilon)
|
||||
v_diff = - lr * g / (tf.sqrt(new_a) + self.epsilon)
|
||||
if self.lr_dropout != 1.0:
|
||||
lr_rnd = self.lr_rnds_dict[v.name]
|
||||
v_diff *= lr_rnd
|
||||
|
|
|
@ -114,7 +114,7 @@ class ArrayFillerSubprocessor(Subprocessor):
|
|||
def __init__(self, sh_b, data_list ):
|
||||
self.sh_b = sh_b
|
||||
self.data_list = data_list
|
||||
super().__init__('ArrayFillerSubprocessor', ArrayFillerSubprocessor.Cli, 60)
|
||||
super().__init__('ArrayFillerSubprocessor', ArrayFillerSubprocessor.Cli, 60, io_loop_sleep_time=0.001)
|
||||
|
||||
#override
|
||||
def process_info_generator(self):
|
||||
|
@ -124,7 +124,7 @@ class ArrayFillerSubprocessor(Subprocessor):
|
|||
#override
|
||||
def get_data(self, host_dict):
|
||||
if len(self.data_list) > 0:
|
||||
return self.data_list.pop(0)
|
||||
return self.data_list.pop(-1)
|
||||
|
||||
return None
|
||||
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 1.3 MiB |
|
@ -1,50 +0,0 @@
|
|||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": "."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"workbench.colorTheme": "Visual Studio Light",
|
||||
"diffEditor.ignoreTrimWhitespace": true,
|
||||
"workbench.sideBar.location": "right",
|
||||
"breadcrumbs.enabled": false,
|
||||
"editor.renderWhitespace": "none",
|
||||
"editor.minimap.enabled": false,
|
||||
"workbench.activityBar.visible": true,
|
||||
"window.menuBarVisibility": "default",
|
||||
"editor.fastScrollSensitivity": 10,
|
||||
"editor.mouseWheelScrollSensitivity": 2,
|
||||
"window.zoomLevel": 0,
|
||||
"extensions.ignoreRecommendations": true,
|
||||
|
||||
"python.linting.pylintEnabled": false,
|
||||
"python.linting.enabled": false,
|
||||
"python.linting.pylamaEnabled": false,
|
||||
"python.linting.pydocstyleEnabled": false,
|
||||
"python.pythonPath": "${env:PYTHON_EXECUTABLE}",
|
||||
"workbench.editor.tabCloseButton": "off",
|
||||
"workbench.editor.tabSizing": "shrink",
|
||||
"workbench.editor.highlightModifiedTabs": true,
|
||||
"editor.mouseWheelScrollSensitivity": 3,
|
||||
"editor.folding": false,
|
||||
"editor.glyphMargin": false,
|
||||
"files.exclude": {
|
||||
"**/__pycache__": true,
|
||||
"**/.github": true,
|
||||
"**/.vscode": true,
|
||||
"**/*.dat": true,
|
||||
"**/*.h5": true,
|
||||
"**/*.npy": true
|
||||
},
|
||||
"editor.quickSuggestions": {
|
||||
"other": false,
|
||||
"comments": false,
|
||||
"strings": false
|
||||
},
|
||||
"editor.trimAutoWhitespace": false,
|
||||
"python.linting.pylintArgs": [
|
||||
"--disable=import-error"
|
||||
]
|
||||
}
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
tqdm
|
||||
numpy==1.17.0
|
||||
numpy==1.19.3
|
||||
h5py==2.9.0
|
||||
opencv-python==4.1.0.25
|
||||
ffmpeg-python==0.1.17
|
||||
scikit-image==0.14.2
|
||||
scipy==1.4.1
|
||||
colorama
|
||||
tensorflow-gpu==1.13.2
|
||||
tensorflow-gpu==2.3.1
|
|
@ -1,11 +1,10 @@
|
|||
tqdm
|
||||
numpy==1.17.0
|
||||
numpy==1.19.3
|
||||
h5py==2.9.0
|
||||
opencv-python==4.1.0.25
|
||||
ffmpeg-python==0.1.17
|
||||
scikit-image==0.14.2
|
||||
scipy==1.4.1
|
||||
colorama
|
||||
labelme==4.2.9
|
||||
tensorflow-gpu==1.13.2
|
||||
tensorflow-gpu==2.4.0rc1
|
||||
pyqt5
|
|
@ -79,6 +79,9 @@ class Sample(object):
|
|||
|
||||
self._filename_offset_size = None
|
||||
|
||||
def has_xseg_mask(self):
|
||||
return self.xseg_mask is not None or self.xseg_mask_compressed is not None
|
||||
|
||||
def get_xseg_mask(self):
|
||||
if self.xseg_mask_compressed is not None:
|
||||
xseg_mask = cv2.imdecode(self.xseg_mask_compressed, cv2.IMREAD_UNCHANGED)
|
||||
|
|
|
@ -26,11 +26,14 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
|
|||
samples = sum([ SampleLoader.load (SampleType.FACE, path) for path in paths ] )
|
||||
seg_sample_idxs = SegmentedSampleFilterSubprocessor(samples).run()
|
||||
|
||||
seg_samples_len = len(seg_sample_idxs)
|
||||
if seg_samples_len == 0:
|
||||
raise Exception(f"No segmented faces found.")
|
||||
if len(seg_sample_idxs) == 0:
|
||||
seg_sample_idxs = SegmentedSampleFilterSubprocessor(samples, count_xseg_mask=True).run()
|
||||
if len(seg_sample_idxs) == 0:
|
||||
raise Exception(f"No segmented faces found.")
|
||||
else:
|
||||
io.log_info(f"Using {len(seg_sample_idxs)} xseg labeled samples.")
|
||||
else:
|
||||
io.log_info(f"Using {seg_samples_len} segmented samples.")
|
||||
io.log_info(f"Using {len(seg_sample_idxs)} segmented samples.")
|
||||
|
||||
if self.debug:
|
||||
self.generators_count = 1
|
||||
|
@ -80,8 +83,16 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
|
|||
def gen_img_mask(sample):
|
||||
img = sample.load_bgr()
|
||||
h,w,c = img.shape
|
||||
mask = np.zeros ((h,w,1), dtype=np.float32)
|
||||
sample.seg_ie_polys.overlay_mask(mask)
|
||||
|
||||
if sample.seg_ie_polys.has_polys():
|
||||
mask = np.zeros ((h,w,1), dtype=np.float32)
|
||||
sample.seg_ie_polys.overlay_mask(mask)
|
||||
elif sample.has_xseg_mask():
|
||||
mask = sample.get_xseg_mask()
|
||||
mask[mask < 0.5] = 0.0
|
||||
mask[mask >= 0.5] = 1.0
|
||||
else:
|
||||
raise Exception(f'no mask in sample {sample.filename}')
|
||||
|
||||
if face_type == sample.face_type:
|
||||
if w != resolution:
|
||||
|
@ -158,9 +169,10 @@ class SampleGeneratorFaceXSeg(SampleGeneratorBase):
|
|||
|
||||
class SegmentedSampleFilterSubprocessor(Subprocessor):
|
||||
#override
|
||||
def __init__(self, samples ):
|
||||
def __init__(self, samples, count_xseg_mask=False ):
|
||||
self.samples = samples
|
||||
self.samples_len = len(self.samples)
|
||||
self.count_xseg_mask = count_xseg_mask
|
||||
|
||||
self.idxs = [*range(self.samples_len)]
|
||||
self.result = []
|
||||
|
@ -169,7 +181,7 @@ class SegmentedSampleFilterSubprocessor(Subprocessor):
|
|||
#override
|
||||
def process_info_generator(self):
|
||||
for i in range(multiprocessing.cpu_count()):
|
||||
yield 'CPU%d' % (i), {}, {'samples':self.samples}
|
||||
yield 'CPU%d' % (i), {}, {'samples':self.samples, 'count_xseg_mask':self.count_xseg_mask}
|
||||
|
||||
#override
|
||||
def on_clients_initialized(self):
|
||||
|
@ -203,6 +215,10 @@ class SegmentedSampleFilterSubprocessor(Subprocessor):
|
|||
#overridable optional
|
||||
def on_initialize(self, client_dict):
|
||||
self.samples = client_dict['samples']
|
||||
self.count_xseg_mask = client_dict['count_xseg_mask']
|
||||
|
||||
def process_data(self, idx):
|
||||
return idx, self.samples[idx].seg_ie_polys.get_pts_count() != 0
|
||||
if self.count_xseg_mask:
|
||||
return idx, self.samples[idx].has_xseg_mask()
|
||||
else:
|
||||
return idx, self.samples[idx].seg_ie_polys.get_pts_count() != 0
|
|
@ -81,7 +81,7 @@ class SampleLoader:
|
|||
shape,
|
||||
landmarks,
|
||||
seg_ie_polys,
|
||||
xseg_mask,
|
||||
xseg_mask_compressed,
|
||||
eyebrows_expand_mod,
|
||||
source_filename ) = data
|
||||
|
||||
|
@ -91,7 +91,7 @@ class SampleLoader:
|
|||
shape=shape,
|
||||
landmarks=landmarks,
|
||||
seg_ie_polys=seg_ie_polys,
|
||||
xseg_mask=xseg_mask,
|
||||
xseg_mask_compressed=xseg_mask_compressed,
|
||||
eyebrows_expand_mod=eyebrows_expand_mod,
|
||||
source_filename=source_filename,
|
||||
))
|
||||
|
@ -163,7 +163,7 @@ class FaceSamplesLoaderSubprocessor(Subprocessor):
|
|||
dflimg.get_shape(),
|
||||
dflimg.get_landmarks(),
|
||||
dflimg.get_seg_ie_polys(),
|
||||
dflimg.get_xseg_mask(),
|
||||
dflimg.get_xseg_mask_compressed(),
|
||||
dflimg.get_eyebrows_expand_mod(),
|
||||
dflimg.get_source_filename() )
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue