mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 21:12:07 -07:00
nothing interesting
This commit is contained in:
parent
847622ee3a
commit
e0907423a9
1 changed files with 178 additions and 8 deletions
182
__dev/test.py
182
__dev/test.py
|
@ -1,5 +1,5 @@
|
||||||
import os
|
import os
|
||||||
os.environ['force_plaidML'] = '1'
|
#os.environ['force_plaidML'] = '1'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import argparse
|
import argparse
|
||||||
|
@ -11,6 +11,7 @@ import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
import time
|
import time
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from utils.DFLPNG import DFLPNG
|
from utils.DFLPNG import DFLPNG
|
||||||
|
@ -303,10 +304,41 @@ def get_transform_mat (image_landmarks, output_size, scale=1.0):
|
||||||
#
|
#
|
||||||
# #alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
|
# #alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
|
||||||
# alignments.append (dflimg.get_source_landmarks())
|
# alignments.append (dflimg.get_source_landmarks())
|
||||||
import mathlib
|
import string
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
import ffmpeg
|
||||||
|
|
||||||
|
path = Path('D:/deepfacelab/test')
|
||||||
|
input_path = str(path / 'input.mp4')
|
||||||
|
#stream = ffmpeg.input(str(path / 'input.mp4') )
|
||||||
|
#stream = ffmpeg.hflip(stream)
|
||||||
|
#stream = ffmpeg.output(stream, str(path / 'output.mp4') )
|
||||||
|
#ffmpeg.run(stream)
|
||||||
|
(
|
||||||
|
ffmpeg
|
||||||
|
.input( str(path / 'input.mp4'))
|
||||||
|
.hflip()
|
||||||
|
.output( str(path / 'output.mp4'), r="23000/1001" )
|
||||||
|
.run()
|
||||||
|
)
|
||||||
|
|
||||||
|
#probe = ffmpeg.probe(str(path / 'input.mp4'))
|
||||||
|
|
||||||
|
#out, _ = (
|
||||||
|
# ffmpeg
|
||||||
|
# .input( input_path )
|
||||||
|
# .output('pipe:', format='rawvideo', pix_fmt='rgb24')
|
||||||
|
# .run(capture_stdout=True)
|
||||||
|
#)
|
||||||
|
#video = (
|
||||||
|
# np
|
||||||
|
# .frombuffer(out, np.uint8)
|
||||||
|
# .reshape([-1, height, width, 3])
|
||||||
|
#)
|
||||||
|
|
||||||
|
import code
|
||||||
|
code.interact(local=dict(globals(), **locals()))
|
||||||
|
|
||||||
from nnlib import nnlib
|
from nnlib import nnlib
|
||||||
exec( nnlib.import_all( device_config=nnlib.device.Config() ), locals(), globals() )
|
exec( nnlib.import_all( device_config=nnlib.device.Config() ), locals(), globals() )
|
||||||
|
@ -1000,7 +1032,7 @@ O[i0, i1, i2, i3: (1 + 1 - 1)/1, (64 + 1 - 1)/1, (64 + 2 - 1)/2, (1 + 1 - 1)/1]
|
||||||
for fs in filter_sizes:
|
for fs in filter_sizes:
|
||||||
h = w = 128
|
h = w = 128
|
||||||
h_f = w_f = fs
|
h_f = w_f = fs
|
||||||
str = 2
|
stri = 2
|
||||||
#print "Testing for", imsize, fs, stri, pad
|
#print "Testing for", imsize, fs, stri, pad
|
||||||
|
|
||||||
#tf.reset_default_graph()
|
#tf.reset_default_graph()
|
||||||
|
@ -1008,9 +1040,9 @@ O[i0, i1, i2, i3: (1 + 1 - 1)/1, (64 + 1 - 1)/1, (64 + 2 - 1)/2, (1 + 1 - 1)/1]
|
||||||
W = tf.constant(np.ones([h_f, w_f, c_in, h_f*w_f*c_in]), tf.float32)
|
W = tf.constant(np.ones([h_f, w_f, c_in, h_f*w_f*c_in]), tf.float32)
|
||||||
|
|
||||||
|
|
||||||
Z = tf.nn.conv2d(X, W, strides=[1, str, str, 1], padding="VALID")
|
Z = tf.nn.conv2d(X, W, strides=[1, stri, stri, 1], padding="VALID")
|
||||||
Z_manual = manual_conv(X, W, strides=[1, str, str, 1], padding="VALID")
|
Z_manual = manual_conv(X, W, strides=[1, stri, stri, 1], padding="VALID")
|
||||||
Z_2 = extract_image_patches (X, (fs,fs), (str,str), padding="VALID")
|
Z_2 = extract_image_patches (X, (fs,fs), (stri,stri), padding="VALID")
|
||||||
import code
|
import code
|
||||||
code.interact(local=dict(globals(), **locals()))
|
code.interact(local=dict(globals(), **locals()))
|
||||||
#
|
#
|
||||||
|
@ -1442,3 +1474,141 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
|
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
def main():
|
||||||
|
import tensorflow
|
||||||
|
####import keras
|
||||||
|
keras = tensorflow.keras
|
||||||
|
K = keras.backend
|
||||||
|
KL = keras.layers
|
||||||
|
|
||||||
|
bgr_shape = (64, 64, 3)
|
||||||
|
inp = KL.Input(bgr_shape)
|
||||||
|
outp = K.mean(inp)
|
||||||
|
|
||||||
|
flow = K.function([inp],[outp])
|
||||||
|
flow ( [np.zeros ( (1,64,64,3), dtype=np.float32 )] )
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import tensorflow as tf
|
||||||
|
keras = tf.keras
|
||||||
|
KL = keras.layers
|
||||||
|
K = keras.backend
|
||||||
|
|
||||||
|
bgr_shape = (128, 128, 3)
|
||||||
|
#batch_size = 132 #max -TF.keras-tf.1.11.0-cuda 9
|
||||||
|
batch_size = 90 #max -TF.keras-tf.1.13.1-cuda 10
|
||||||
|
|
||||||
|
class PixelShuffler(keras.layers.Layer):
|
||||||
|
def __init__(self, size=(2, 2), data_format=None, **kwargs):
|
||||||
|
super(PixelShuffler, self).__init__(**kwargs)
|
||||||
|
self.size = size
|
||||||
|
|
||||||
|
def call(self, inputs):
|
||||||
|
|
||||||
|
input_shape = K.int_shape(inputs)
|
||||||
|
if len(input_shape) != 4:
|
||||||
|
raise ValueError('Inputs should have rank ' +
|
||||||
|
str(4) +
|
||||||
|
'; Received input shape:', str(input_shape))
|
||||||
|
|
||||||
|
|
||||||
|
batch_size, h, w, c = input_shape
|
||||||
|
if batch_size is None:
|
||||||
|
batch_size = -1
|
||||||
|
rh, rw = self.size
|
||||||
|
oh, ow = h * rh, w * rw
|
||||||
|
oc = c // (rh * rw)
|
||||||
|
|
||||||
|
out = K.reshape(inputs, (batch_size, h, w, rh, rw, oc))
|
||||||
|
out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
|
||||||
|
out = K.reshape(out, (batch_size, oh, ow, oc))
|
||||||
|
return out
|
||||||
|
|
||||||
|
def compute_output_shape(self, input_shape):
|
||||||
|
|
||||||
|
if len(input_shape) != 4:
|
||||||
|
raise ValueError('Inputs should have rank ' +
|
||||||
|
str(4) +
|
||||||
|
'; Received input shape:', str(input_shape))
|
||||||
|
|
||||||
|
|
||||||
|
height = input_shape[1] * self.size[0] if input_shape[1] is not None else None
|
||||||
|
width = input_shape[2] * self.size[1] if input_shape[2] is not None else None
|
||||||
|
channels = input_shape[3] // self.size[0] // self.size[1]
|
||||||
|
|
||||||
|
if channels * self.size[0] * self.size[1] != input_shape[3]:
|
||||||
|
raise ValueError('channels of input and size are incompatible')
|
||||||
|
|
||||||
|
return (input_shape[0],
|
||||||
|
height,
|
||||||
|
width,
|
||||||
|
channels)
|
||||||
|
|
||||||
|
def get_config(self):
|
||||||
|
config = {'size': self.size}
|
||||||
|
base_config = super(PixelShuffler, self).get_config()
|
||||||
|
|
||||||
|
return dict(list(base_config.items()) + list(config.items()))
|
||||||
|
|
||||||
|
def upscale (dim):
|
||||||
|
def func(x):
|
||||||
|
return PixelShuffler()((KL.Conv2D(dim * 4, kernel_size=3, strides=1, padding='same')(x)))
|
||||||
|
return func
|
||||||
|
|
||||||
|
inp = KL.Input(bgr_shape)
|
||||||
|
x = inp
|
||||||
|
x = KL.Conv2D(128, 5, strides=2, padding='same')(x)
|
||||||
|
x = KL.Conv2D(256, 5, strides=2, padding='same')(x)
|
||||||
|
x = KL.Conv2D(512, 5, strides=2, padding='same')(x)
|
||||||
|
x = KL.Conv2D(1024, 5, strides=2, padding='same')(x)
|
||||||
|
x = KL.Dense(1024)(KL.Flatten()(x))
|
||||||
|
x = KL.Dense(8 * 8 * 1024)(x)
|
||||||
|
x = KL.Reshape((8, 8, 1024))(x)
|
||||||
|
x = upscale(512)(x)
|
||||||
|
x = upscale(256)(x)
|
||||||
|
x = upscale(128)(x)
|
||||||
|
x = upscale(64)(x)
|
||||||
|
x = KL.Conv2D(3, 5, strides=1, padding='same')(x)
|
||||||
|
|
||||||
|
model = keras.models.Model ([inp], [x])
|
||||||
|
model.compile(optimizer=keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss='mae')
|
||||||
|
|
||||||
|
training_data = np.zeros ( (batch_size,128,128,3) )
|
||||||
|
loss = model.train_on_batch( [training_data], [training_data] )
|
||||||
|
print ("FINE")
|
||||||
|
|
||||||
|
import code
|
||||||
|
code.interact(local=dict(globals(), **locals()))
|
||||||
|
|
||||||
|
#import keras
|
||||||
|
#batch_size = 84 #max -keras-tf.1.11.0-cuda 9
|
||||||
|
#batch_size = 80 #max -keras-tf.1.13.1-cuda 10
|
||||||
|
|
||||||
|
#inp = KL.Input(bgr_shape)
|
||||||
|
#inp2 = KL.Input(bgr_shape)
|
||||||
|
#outp = model(inp)
|
||||||
|
#loss = K.mean(K.abs(outp-inp2))
|
||||||
|
#train_func = K.function ([inp,inp2],[loss], keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999).get_updates(loss, model.trainable_weights) )
|
||||||
|
#
|
||||||
|
#loss = train_func( [training_data, training_data] )
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue