mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 21:12:07 -07:00
refactorings, improved sort by hist-dissim
This commit is contained in:
parent
4ff67ad26b
commit
9926dc626a
6 changed files with 128 additions and 127 deletions
2
main.py
2
main.py
|
@ -58,7 +58,7 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
sort_parser = subparsers.add_parser( "sort", help="Sort faces in a directory.")
|
sort_parser = subparsers.add_parser( "sort", help="Sort faces in a directory.")
|
||||||
sort_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
|
sort_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
|
||||||
sort_parser.add_argument('--by', required=True, dest="sort_by_method", choices=("blur", "face", "face-dissim", "face-yaw", "hist", "hist-dissim", "hist-blur", "ssim", "brightness", "hue", "black", "origname"), help="Method of sorting. 'origname' sort by original filename to recover original sequence." )
|
sort_parser.add_argument('--by', required=True, dest="sort_by_method", choices=("blur", "face", "face-dissim", "face-yaw", "hist", "hist-dissim", "hist-blur", "brightness", "hue", "black", "origname"), help="Method of sorting. 'origname' sort by original filename to recover original sequence." )
|
||||||
sort_parser.set_defaults (func=process_sort)
|
sort_parser.set_defaults (func=process_sort)
|
||||||
|
|
||||||
def process_train(arguments):
|
def process_train(arguments):
|
||||||
|
|
|
@ -3,7 +3,7 @@ from pathlib import Path
|
||||||
from utils import Path_utils
|
from utils import Path_utils
|
||||||
import cv2
|
import cv2
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from utils.AlignedPNG import AlignedPNG
|
from utils.DFLPNG import DFLPNG
|
||||||
from utils import image_utils
|
from utils import image_utils
|
||||||
import shutil
|
import shutil
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -156,12 +156,7 @@ class ConvertSubprocessor(SubprocessorBase):
|
||||||
image = (cv2.imread(str(filename_path)) / 255.0).astype(np.float32)
|
image = (cv2.imread(str(filename_path)) / 255.0).astype(np.float32)
|
||||||
|
|
||||||
if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
|
if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
|
||||||
image_landmarks = None
|
image_landmarks = DFLPNG.load( str(filename_path), throw_on_no_embedded_data=True ).get_landmarks()
|
||||||
a_png = AlignedPNG.load( str(filename_path) )
|
|
||||||
if a_png is not None:
|
|
||||||
d = a_png.getFaceswapDictData()
|
|
||||||
if d is not None and 'landmarks' in d.keys():
|
|
||||||
image_landmarks = np.array(d['landmarks'])
|
|
||||||
|
|
||||||
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
image = self.converter.convert_image(image, image_landmarks, self.debug)
|
||||||
if self.debug:
|
if self.debug:
|
||||||
|
@ -258,20 +253,15 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
|
||||||
|
|
||||||
aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
|
aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
|
||||||
for filename in tqdm(aligned_path_image_paths, desc= "Collecting alignments" ):
|
for filename in tqdm(aligned_path_image_paths, desc= "Collecting alignments" ):
|
||||||
a_png = AlignedPNG.load( str(filename) )
|
dflpng = DFLPNG.load( str(filename), print_on_no_embedded_data=True )
|
||||||
if a_png is None:
|
if dflpng is None:
|
||||||
print ( "%s - no embedded data found." % (filename) )
|
|
||||||
continue
|
|
||||||
d = a_png.getFaceswapDictData()
|
|
||||||
if d is None or d['source_filename'] is None or d['source_rect'] is None or d['source_landmarks'] is None:
|
|
||||||
print ( "%s - no embedded data found." % (filename) )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
source_filename_stem = Path(d['source_filename']).stem
|
source_filename_stem = Path( dflpng.get_source_filename() ).stem
|
||||||
if source_filename_stem not in alignments.keys():
|
if source_filename_stem not in alignments.keys():
|
||||||
alignments[ source_filename_stem ] = []
|
alignments[ source_filename_stem ] = []
|
||||||
|
|
||||||
alignments[ source_filename_stem ].append ( np.array(d['source_landmarks']) )
|
alignments[ source_filename_stem ].append (dflpng.get_source_landmarks())
|
||||||
|
|
||||||
|
|
||||||
files_processed, faces_processed = ConvertSubprocessor (
|
files_processed, faces_processed = ConvertSubprocessor (
|
||||||
|
|
|
@ -8,7 +8,7 @@ from pathlib import Path
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
from utils import Path_utils
|
from utils import Path_utils
|
||||||
from utils.AlignedPNG import AlignedPNG
|
from utils.DFLPNG import DFLPNG
|
||||||
from utils import image_utils
|
from utils import image_utils
|
||||||
from facelib import FaceType
|
from facelib import FaceType
|
||||||
import facelib
|
import facelib
|
||||||
|
@ -314,19 +314,14 @@ class ExtractSubprocessor(SubprocessorBase):
|
||||||
|
|
||||||
cv2.imwrite(output_file, face_image)
|
cv2.imwrite(output_file, face_image)
|
||||||
|
|
||||||
a_png = AlignedPNG.load (output_file)
|
DFLPNG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
|
||||||
|
landmarks = face_image_landmarks.tolist(),
|
||||||
d = {
|
yaw_value = facelib.LandmarksProcessor.calc_face_yaw (face_image_landmarks),
|
||||||
'face_type': FaceType.toString(self.face_type),
|
pitch_value = facelib.LandmarksProcessor.calc_face_pitch (face_image_landmarks),
|
||||||
'landmarks': face_image_landmarks.tolist(),
|
source_filename = filename_path.name,
|
||||||
'yaw_value': facelib.LandmarksProcessor.calc_face_yaw (face_image_landmarks),
|
source_rect= rect,
|
||||||
'pitch_value': facelib.LandmarksProcessor.calc_face_pitch (face_image_landmarks),
|
source_landmarks = image_landmarks.tolist()
|
||||||
'source_filename': filename_path.name,
|
)
|
||||||
'source_rect': rect,
|
|
||||||
'source_landmarks': image_landmarks.tolist()
|
|
||||||
}
|
|
||||||
a_png.setFaceswapDictData (d)
|
|
||||||
a_png.save(output_file)
|
|
||||||
|
|
||||||
result.append (output_file)
|
result.append (output_file)
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,8 @@ from shutil import copyfile
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from utils import Path_utils
|
from utils import Path_utils
|
||||||
from utils.AlignedPNG import AlignedPNG
|
from utils import image_utils
|
||||||
|
from utils.DFLPNG import DFLPNG
|
||||||
from facelib import LandmarksProcessor
|
from facelib import LandmarksProcessor
|
||||||
from utils.SubprocessorBase import SubprocessorBase
|
from utils.SubprocessorBase import SubprocessorBase
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
@ -86,22 +87,16 @@ class BlurEstimatorSubprocessor(SubprocessorBase):
|
||||||
#override
|
#override
|
||||||
def onClientProcessData(self, data):
|
def onClientProcessData(self, data):
|
||||||
filename_path = Path( data[0] )
|
filename_path = Path( data[0] )
|
||||||
|
|
||||||
|
dflpng = DFLPNG.load( str(filename_path), print_on_no_embedded_data=True )
|
||||||
|
if dflpng is not None:
|
||||||
image = cv2.imread( str(filename_path) )
|
image = cv2.imread( str(filename_path) )
|
||||||
face_mask = None
|
image = ( image * \
|
||||||
|
LandmarksProcessor.get_image_hull_mask (image, dflpng.get_landmarks()) \
|
||||||
a_png = AlignedPNG.load( str(filename_path) )
|
).astype(np.uint8)
|
||||||
if a_png is not None:
|
|
||||||
d = a_png.getFaceswapDictData()
|
|
||||||
if (d is not None) and (d['landmarks'] is not None):
|
|
||||||
face_mask = LandmarksProcessor.get_image_hull_mask (image, np.array(d['landmarks']))
|
|
||||||
|
|
||||||
if face_mask is not None:
|
|
||||||
image = (image*face_mask).astype(np.uint8)
|
|
||||||
else:
|
|
||||||
print ( "%s - no embedded data found." % (str(filename_path)) )
|
|
||||||
return [ str(filename_path), 0 ]
|
|
||||||
|
|
||||||
return [ str(filename_path), estimate_sharpness( image ) ]
|
return [ str(filename_path), estimate_sharpness( image ) ]
|
||||||
|
else:
|
||||||
|
return [ str(filename_path), 0 ]
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def onClientGetDataName (self, data):
|
def onClientGetDataName (self, data):
|
||||||
|
@ -164,18 +159,11 @@ def sort_by_face(input_path):
|
||||||
print ("%s is not a png file required for sort_by_face" % (filepath.name) )
|
print ("%s is not a png file required for sort_by_face" % (filepath.name) )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
a_png = AlignedPNG.load (str(filepath))
|
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
|
||||||
if a_png is None:
|
if dflpng is None:
|
||||||
print ("%s failed to load" % (filepath.name) )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
d = a_png.getFaceswapDictData()
|
img_list.append( [str(filepath), dflpng.get_landmarks()] )
|
||||||
|
|
||||||
if d is None or d['landmarks'] is None:
|
|
||||||
print ("%s - no embedded data found required for sort_by_face" % (filepath.name) )
|
|
||||||
continue
|
|
||||||
|
|
||||||
img_list.append( [str(filepath), np.array(d['landmarks']) ] )
|
|
||||||
|
|
||||||
|
|
||||||
img_list_len = len(img_list)
|
img_list_len = len(img_list)
|
||||||
|
@ -207,18 +195,11 @@ def sort_by_face_dissim(input_path):
|
||||||
print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) )
|
print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
a_png = AlignedPNG.load (str(filepath))
|
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
|
||||||
if a_png is None:
|
if dflpng is None:
|
||||||
print ("%s failed to load" % (filepath.name) )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
d = a_png.getFaceswapDictData()
|
img_list.append( [str(filepath), dflpng.get_landmarks(), 0 ] )
|
||||||
|
|
||||||
if d is None or d['landmarks'] is None:
|
|
||||||
print ("%s - no embedded data found required for sort_by_face_dissim" % (filepath.name) )
|
|
||||||
continue
|
|
||||||
|
|
||||||
img_list.append( [str(filepath), np.array(d['landmarks']), 0 ] )
|
|
||||||
|
|
||||||
img_list_len = len(img_list)
|
img_list_len = len(img_list)
|
||||||
for i in tqdm( range(0, img_list_len-1), desc="Sorting"):
|
for i in tqdm( range(0, img_list_len-1), desc="Sorting"):
|
||||||
|
@ -247,18 +228,11 @@ def sort_by_face_yaw(input_path):
|
||||||
print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) )
|
print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
a_png = AlignedPNG.load (str(filepath))
|
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
|
||||||
if a_png is None:
|
if dflpng is None:
|
||||||
print ("%s failed to load" % (filepath.name) )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
d = a_png.getFaceswapDictData()
|
img_list.append( [str(filepath), np.array( dflpng.get_yaw_value() ) ] )
|
||||||
|
|
||||||
if d is None or d['yaw_value'] is None:
|
|
||||||
print ("%s - no embedded data found required for sort_by_face_dissim" % (filepath.name) )
|
|
||||||
continue
|
|
||||||
|
|
||||||
img_list.append( [str(filepath), np.array(d['yaw_value']) ] )
|
|
||||||
|
|
||||||
print ("Sorting...")
|
print ("Sorting...")
|
||||||
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
|
||||||
|
@ -423,9 +397,7 @@ class HistDissimSubprocessor(SubprocessorBase):
|
||||||
for j in range( 0, self.img_list_len):
|
for j in range( 0, self.img_list_len):
|
||||||
if i == j:
|
if i == j:
|
||||||
continue
|
continue
|
||||||
score_total += cv2.compareHist(self.img_list[i][1], self.img_list[j][1], cv2.HISTCMP_BHATTACHARYYA) + \
|
score_total += cv2.compareHist(self.img_list[i][1], self.img_list[j][1], cv2.HISTCMP_BHATTACHARYYA)
|
||||||
cv2.compareHist(self.img_list[i][2], self.img_list[j][2], cv2.HISTCMP_BHATTACHARYYA) + \
|
|
||||||
cv2.compareHist(self.img_list[i][3], self.img_list[j][3], cv2.HISTCMP_BHATTACHARYYA)
|
|
||||||
|
|
||||||
return score_total
|
return score_total
|
||||||
|
|
||||||
|
@ -436,7 +408,7 @@ class HistDissimSubprocessor(SubprocessorBase):
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def onHostResult (self, data, result):
|
def onHostResult (self, data, result):
|
||||||
self.img_list[data[0]][4] = result
|
self.img_list[data[0]][2] = result
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
#override
|
#override
|
||||||
|
@ -451,17 +423,20 @@ def sort_by_hist_dissim(input_path):
|
||||||
print ("Sorting by histogram dissimilarity...")
|
print ("Sorting by histogram dissimilarity...")
|
||||||
|
|
||||||
img_list = []
|
img_list = []
|
||||||
for x in tqdm( Path_utils.get_image_paths(input_path), desc="Loading"):
|
for filename_path in tqdm( Path_utils.get_image_paths(input_path), desc="Loading"):
|
||||||
img = cv2.imread(x)
|
image = cv2.imread(filename_path)
|
||||||
img_list.append ([x, cv2.calcHist([img], [0], None, [256], [0, 256]),
|
|
||||||
cv2.calcHist([img], [1], None, [256], [0, 256]),
|
dflpng = DFLPNG.load( str(filename_path), print_on_no_embedded_data=True )
|
||||||
cv2.calcHist([img], [2], None, [256], [0, 256]), 0
|
if dflpng is not None:
|
||||||
])
|
face_mask = LandmarksProcessor.get_image_hull_mask (image, dflpng.get_landmarks())
|
||||||
|
image = (image*face_mask).astype(np.uint8)
|
||||||
|
|
||||||
|
img_list.append ([filename_path, cv2.calcHist([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]), 0 ])
|
||||||
|
|
||||||
img_list = HistDissimSubprocessor(img_list).process()
|
img_list = HistDissimSubprocessor(img_list).process()
|
||||||
|
|
||||||
print ("Sorting...")
|
print ("Sorting...")
|
||||||
img_list = sorted(img_list, key=operator.itemgetter(4), reverse=True)
|
img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)
|
||||||
|
|
||||||
return img_list
|
return img_list
|
||||||
|
|
||||||
|
@ -508,18 +483,11 @@ def sort_by_origname(input_path):
|
||||||
print ("%s is not a png file required for sort_by_origname" % (filepath.name) )
|
print ("%s is not a png file required for sort_by_origname" % (filepath.name) )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
a_png = AlignedPNG.load (str(filepath))
|
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
|
||||||
if a_png is None:
|
if dflpng is None:
|
||||||
print ("%s failed to load" % (filepath.name) )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
d = a_png.getFaceswapDictData()
|
img_list.append( [str(filepath), dflpng.get_source_filename()] )
|
||||||
|
|
||||||
if d is None or d['source_filename'] is None:
|
|
||||||
print ("%s - no embedded data found required for sort_by_origname" % (filepath.name) )
|
|
||||||
continue
|
|
||||||
|
|
||||||
img_list.append( [str(filepath), d['source_filename']] )
|
|
||||||
|
|
||||||
print ("Sorting...")
|
print ("Sorting...")
|
||||||
img_list = sorted(img_list, key=operator.itemgetter(1))
|
img_list = sorted(img_list, key=operator.itemgetter(1))
|
||||||
|
|
|
@ -4,7 +4,7 @@ from pathlib import Path
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
from utils.AlignedPNG import AlignedPNG
|
from utils.DFLPNG import DFLPNG
|
||||||
from utils import iter_utils
|
from utils import iter_utils
|
||||||
from utils import Path_utils
|
from utils import Path_utils
|
||||||
from .BaseTypes import TrainingDataType
|
from .BaseTypes import TrainingDataType
|
||||||
|
@ -177,19 +177,14 @@ def X_LOAD ( RAWS ):
|
||||||
print ("%s is not a png file required for training" % (s_filename_path.name) )
|
print ("%s is not a png file required for training" % (s_filename_path.name) )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
a_png = AlignedPNG.load ( str(s_filename_path) )
|
dflpng = DFLPNG.load ( str(s_filename_path), print_on_no_embedded_data=True )
|
||||||
if a_png is None:
|
if dflpng is None:
|
||||||
print ("%s failed to load" % (s_filename_path.name) )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
d = a_png.getFaceswapDictData()
|
sample_list.append( s.copy_and_set(face_type=FaceType.fromString (dflpng.get_face_type()),
|
||||||
if d is None or d['landmarks'] is None or d['yaw_value'] is None:
|
shape=dflpng.get_shape(),
|
||||||
print ("%s - no embedded faceswap info found required for training" % (s_filename_path.name) )
|
landmarks=dflpng.get_landmarks(),
|
||||||
continue
|
yaw=dflpng.get_yaw_value()) )
|
||||||
|
|
||||||
face_type = d['face_type'] if 'face_type' in d.keys() else 'full_face'
|
|
||||||
face_type = FaceType.fromString (face_type)
|
|
||||||
sample_list.append( s.copy_and_set(face_type=face_type, shape=a_png.get_shape(), landmarks=d['landmarks'], yaw=d['yaw_value']) )
|
|
||||||
|
|
||||||
return sample_list
|
return sample_list
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import string
|
||||||
import struct
|
import struct
|
||||||
import zlib
|
import zlib
|
||||||
import pickle
|
import pickle
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
class Chunk(object):
|
class Chunk(object):
|
||||||
def __init__(self, name=None, data=None):
|
def __init__(self, name=None, data=None):
|
||||||
|
@ -184,7 +185,7 @@ class IEND(Chunk):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<Chunk:IEND>".format(**self.__dict__)
|
return "<Chunk:IEND>".format(**self.__dict__)
|
||||||
|
|
||||||
class FaceswapChunk(Chunk):
|
class DFLChunk(Chunk):
|
||||||
def __init__(self, dict_data=None):
|
def __init__(self, dict_data=None):
|
||||||
super().__init__("fcWp")
|
super().__init__("fcWp")
|
||||||
self.dict_data = dict_data
|
self.dict_data = dict_data
|
||||||
|
@ -207,26 +208,26 @@ class FaceswapChunk(Chunk):
|
||||||
|
|
||||||
chunk_map = {
|
chunk_map = {
|
||||||
b"IHDR": IHDR,
|
b"IHDR": IHDR,
|
||||||
b"fcWp": FaceswapChunk,
|
b"fcWp": DFLChunk,
|
||||||
b"IEND": IEND
|
b"IEND": IEND
|
||||||
}
|
}
|
||||||
|
|
||||||
class AlignedPNG(object):
|
class DFLPNG(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.data = b""
|
self.data = b""
|
||||||
self.length = 0
|
self.length = 0
|
||||||
self.chunks = []
|
self.chunks = []
|
||||||
|
self.fcwp_dict = None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(data):
|
def load_raw(filename):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(data, "rb") as f:
|
with open(filename, "rb") as f:
|
||||||
data = f.read()
|
data = f.read()
|
||||||
except:
|
except:
|
||||||
raise FileNotFoundError(data)
|
raise FileNotFoundError(data)
|
||||||
|
|
||||||
inst = AlignedPNG()
|
inst = DFLPNG()
|
||||||
inst.data = data
|
inst.data = data
|
||||||
inst.length = len(data)
|
inst.length = len(data)
|
||||||
|
|
||||||
|
@ -245,11 +246,44 @@ class AlignedPNG(object):
|
||||||
|
|
||||||
return inst
|
return inst
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load(filename, print_on_no_embedded_data=False, throw_on_no_embedded_data=False):
|
||||||
|
inst = DFLPNG.load_raw (filename)
|
||||||
|
inst.fcwp_dict = inst.getDFLDictData()
|
||||||
|
|
||||||
|
if inst.fcwp_dict == None:
|
||||||
|
if print_on_no_embedded_data:
|
||||||
|
print ( "No DFL data found in %s" % (filename) )
|
||||||
|
if throw_on_no_embedded_data:
|
||||||
|
raise ValueError("No DFL data found in %s" % (filename) )
|
||||||
|
return None
|
||||||
|
|
||||||
|
return inst
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def embed_data(filename, face_type=None,
|
||||||
|
landmarks=None,
|
||||||
|
yaw_value=None,
|
||||||
|
pitch_value=None,
|
||||||
|
source_filename=None,
|
||||||
|
source_rect=None,
|
||||||
|
source_landmarks=None
|
||||||
|
):
|
||||||
|
|
||||||
|
inst = DFLPNG.load_raw (filename)
|
||||||
|
inst.setDFLDictData ({
|
||||||
|
'face_type': face_type,
|
||||||
|
'landmarks': landmarks,
|
||||||
|
'yaw_value': yaw_value,
|
||||||
|
'pitch_value': pitch_value,
|
||||||
|
'source_filename': source_filename,
|
||||||
|
'source_rect': source_rect,
|
||||||
|
'source_landmarks': source_landmarks
|
||||||
|
})
|
||||||
|
|
||||||
def save(self, filename):
|
|
||||||
try:
|
try:
|
||||||
with open(filename, "wb") as f:
|
with open(filename, "wb") as f:
|
||||||
f.write ( self.dump() )
|
f.write ( inst.dump() )
|
||||||
except:
|
except:
|
||||||
raise Exception( 'cannot save %s' % (filename) )
|
raise Exception( 'cannot save %s' % (filename) )
|
||||||
|
|
||||||
|
@ -274,23 +308,42 @@ class AlignedPNG(object):
|
||||||
return chunk.height
|
return chunk.height
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def getFaceswapDictData(self):
|
def getDFLDictData(self):
|
||||||
for chunk in self.chunks:
|
for chunk in self.chunks:
|
||||||
if type(chunk) == FaceswapChunk:
|
if type(chunk) == DFLChunk:
|
||||||
return chunk.getDictData()
|
return chunk.getDictData()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def setFaceswapDictData (self, dict_data=None):
|
def setDFLDictData (self, dict_data=None):
|
||||||
for chunk in self.chunks:
|
for chunk in self.chunks:
|
||||||
if type(chunk) == FaceswapChunk:
|
if type(chunk) == DFLChunk:
|
||||||
self.chunks.remove(chunk)
|
self.chunks.remove(chunk)
|
||||||
break
|
break
|
||||||
|
|
||||||
if not dict_data is None:
|
if not dict_data is None:
|
||||||
chunk = FaceswapChunk(dict_data)
|
chunk = DFLChunk(dict_data)
|
||||||
self.chunks.insert(-1, chunk)
|
self.chunks.insert(-1, chunk)
|
||||||
|
|
||||||
|
def get_face_type(self):
|
||||||
|
return self.fcwp_dict['face_type']
|
||||||
|
|
||||||
|
def get_landmarks(self):
|
||||||
|
return np.array ( self.fcwp_dict['landmarks'] )
|
||||||
|
|
||||||
|
def get_yaw_value(self):
|
||||||
|
return self.fcwp_dict['yaw_value']
|
||||||
|
|
||||||
|
def get_pitch_value(self):
|
||||||
|
return self.fcwp_dict['pitch_value']
|
||||||
|
|
||||||
|
def get_source_filename(self):
|
||||||
|
return self.fcwp_dict['source_filename']
|
||||||
|
|
||||||
|
def get_source_rect(self):
|
||||||
|
return self.fcwp_dict['source_rect']
|
||||||
|
|
||||||
|
def get_source_landmarks(self):
|
||||||
|
return np.array ( self.fcwp_dict['source_landmarks'] )
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<PNG length={length} chunks={}>".format(len(self.chunks), **self.__dict__)
|
return "<PNG length={length} chunks={}>".format(len(self.chunks), **self.__dict__)
|
Loading…
Add table
Add a link
Reference in a new issue