DFL now works with JPG files by default. Old PNG files also supported. Added util convertor PNG to JPG.

This commit is contained in:
iperov 2019-02-04 19:47:24 +04:00
parent f0a5f97995
commit 6d95dd4a99
9 changed files with 400 additions and 92 deletions

13
main.py
View file

@ -53,8 +53,6 @@ if __name__ == "__main__":
extract_parser.add_argument('--manual-output-debug-fix', action="store_true", dest="manual_output_debug_fix", default=False, help="Performs manual reextract input-dir frames which were deleted from [output_dir]_debug\ dir.")
extract_parser.add_argument('--manual-window-size', type=int, dest="manual_window_size", default=1368, help="Manual fix window size. Default: 1368.")
extract_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Extract on CPU. Forces to use MT extractor.")
extract_parser.set_defaults (func=process_extract)
def process_sort(arguments):
@ -66,6 +64,17 @@ if __name__ == "__main__":
sort_parser.add_argument('--by', required=True, dest="sort_by_method", choices=("blur", "face", "face-dissim", "face-yaw", "hist", "hist-dissim", "brightness", "hue", "black", "origname", "final", "test"), help="Method of sorting. 'origname' sort by original filename to recover original sequence." )
sort_parser.set_defaults (func=process_sort)
def process_util(arguments):
from mainscripts import Util
if arguments.convert_png_to_jpg:
Util.convert_png_to_jpg_folder (input_path=arguments.input_dir)
util_parser = subparsers.add_parser( "util", help="Utilities.")
util_parser.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
util_parser.add_argument('--convert-png-to-jpg', action="store_true", dest="convert_png_to_jpg", default=False, help="Convert DeepFaceLAB PNG files to JPEG.")
util_parser.set_defaults (func=process_util)
def process_train(arguments):
from mainscripts import Trainer
Trainer.main (

View file

@ -6,6 +6,7 @@ from utils import Path_utils
import cv2
from tqdm import tqdm
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from utils import image_utils
import shutil
import numpy as np
@ -163,7 +164,14 @@ class ConvertSubprocessor(SubprocessorBase):
cv2.waitKey(0)
faces_processed = 1
elif self.converter.get_mode() == ConverterBase.MODE_IMAGE_WITH_LANDMARKS:
image_landmarks = DFLPNG.load( str(filename_path), throw_on_no_embedded_data=True ).get_landmarks()
if filename_path.suffix == '.png':
dflimg = DFLPNG.load( str(filename_path), throw_on_no_embedded_data=True )
elif filename_path.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filename_path), throw_on_no_embedded_data=True )
else:
raise Exception ("%s is not a dfl image file" % (filename_path.name) )
image_landmarks = dflimg.get_landmarks()
image = self.converter.convert_image(image, image_landmarks, self.debug)
if self.debug:
@ -259,16 +267,21 @@ def main (input_dir, output_dir, model_dir, model_name, aligned_dir=None, **in_o
alignments = {}
aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
for filename in tqdm(aligned_path_image_paths, desc="Collecting alignments", ascii=True ):
dflpng = DFLPNG.load( str(filename), print_on_no_embedded_data=True )
if dflpng is None:
continue
for filepath in tqdm(aligned_path_image_paths, desc="Collecting alignments", ascii=True ):
filepath = Path(filepath)
source_filename_stem = Path( dflpng.get_source_filename() ).stem
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
source_filename_stem = Path( dflimg.get_source_filename() ).stem
if source_filename_stem not in alignments.keys():
alignments[ source_filename_stem ] = []
alignments[ source_filename_stem ].append (dflpng.get_source_landmarks())
alignments[ source_filename_stem ].append (dflimg.get_source_landmarks())
#interpolate landmarks

View file

@ -7,7 +7,7 @@ from pathlib import Path
import numpy as np
import cv2
from utils import Path_utils
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from utils import image_utils
from facelib import FaceType
import facelib
@ -311,7 +311,7 @@ class ExtractSubprocessor(SubprocessorBase):
debug_image = image.copy()
for (face_idx, face) in enumerate(faces):
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.png')
output_file = '{}_{}{}'.format(str(self.output_path / filename_path.stem), str(face_idx), '.jpg')
rect = face[0]
image_landmarks = np.array(face[1])
@ -329,7 +329,7 @@ class ExtractSubprocessor(SubprocessorBase):
cv2.imwrite(output_file, face_image)
DFLPNG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
DFLJPG.embed_data(output_file, face_type = FaceType.toString(self.face_type),
landmarks = face_image_landmarks.tolist(),
yaw_value = facelib.LandmarksProcessor.calc_face_yaw (face_image_landmarks),
pitch_value = facelib.LandmarksProcessor.calc_face_pitch (face_image_landmarks),

View file

@ -10,6 +10,7 @@ from pathlib import Path
from utils import Path_utils
from utils import image_utils
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from facelib import LandmarksProcessor
from utils.SubprocessorBase import SubprocessorBase
import multiprocessing
@ -86,17 +87,24 @@ class BlurEstimatorSubprocessor(SubprocessorBase):
#override
def onClientProcessData(self, data):
filename_path = Path( data[0] )
filepath = Path( data[0] )
dflpng = DFLPNG.load( str(filename_path), print_on_no_embedded_data=True )
if dflpng is not None:
image = cv2.imread( str(filename_path) )
image = ( image * \
LandmarksProcessor.get_image_hull_mask (image.shape, dflpng.get_landmarks()) \
).astype(np.uint8)
return [ str(filename_path), estimate_sharpness( image ) ]
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
else:
return [ str(filename_path), 0 ]
print ("%s is not a dfl image file" % (filepath.name) )
dflimg = None
if dflimg is not None:
image = cv2.imread( str(filepath) )
image = ( image * \
LandmarksProcessor.get_image_hull_mask (image.shape, dflimg.get_landmarks()) \
).astype(np.uint8)
return [ str(filepath), estimate_sharpness( image ) ]
else:
return [ str(filepath), 0 ]
#override
def onClientGetDataName (self, data):
@ -151,15 +159,15 @@ def sort_by_face(input_path):
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading", ascii=True):
filepath = Path(filepath)
if filepath.suffix != '.png':
print ("%s is not a png file required for sort_by_face" % (filepath.name) )
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
continue
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
continue
img_list.append( [str(filepath), dflpng.get_landmarks()] )
img_list.append( [str(filepath), dflimg.get_landmarks()] )
img_list_len = len(img_list)
@ -187,15 +195,15 @@ def sort_by_face_dissim(input_path):
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading", ascii=True):
filepath = Path(filepath)
if filepath.suffix != '.png':
print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) )
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
continue
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
continue
img_list.append( [str(filepath), dflpng.get_landmarks(), 0 ] )
img_list.append( [str(filepath), dflimg.get_landmarks(), 0 ] )
img_list_len = len(img_list)
for i in tqdm( range(0, img_list_len-1), desc="Sorting", ascii=True):
@ -220,15 +228,15 @@ def sort_by_face_yaw(input_path):
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading", ascii=True):
filepath = Path(filepath)
if filepath.suffix != '.png':
print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) )
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
continue
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
continue
img_list.append( [str(filepath), np.array( dflpng.get_yaw_value() ) ] )
img_list.append( [str(filepath), np.array( dflimg.get_yaw_value() ) ] )
print ("Sorting...")
img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
@ -436,15 +444,22 @@ def sort_by_hist_dissim(input_path):
print ("Sorting by histogram dissimilarity...")
img_list = []
for filename_path in tqdm( Path_utils.get_image_paths(input_path), desc="Loading", ascii=True):
image = cv2.imread(filename_path)
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading", ascii=True):
filepath = Path(filepath)
dflpng = DFLPNG.load( str(filename_path) )
if dflpng is not None:
face_mask = LandmarksProcessor.get_image_hull_mask (image.shape, dflpng.get_landmarks())
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load ( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
continue
image = cv2.imread(str(filepath))
face_mask = LandmarksProcessor.get_image_hull_mask (image.shape, dflimg.get_landmarks())
image = (image*face_mask).astype(np.uint8)
img_list.append ([filename_path, cv2.calcHist([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]), 0 ])
img_list.append ([str(filepath), cv2.calcHist([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]), 0 ])
img_list = HistDissimSubprocessor(img_list).process()
@ -512,11 +527,12 @@ class FinalLoaderSubprocessor(SubprocessorBase):
filepath = Path(data[0])
try:
if filepath.suffix != '.png':
raise Exception ("%s is not a png file required for sort_final" % (filepath.name) )
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
raise Exception("")
bgr = cv2.imread(str(filepath))
@ -524,14 +540,14 @@ class FinalLoaderSubprocessor(SubprocessorBase):
raise Exception ("Unable to load %s" % (filepath.name) )
gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
gray_masked = ( gray * LandmarksProcessor.get_image_hull_mask (bgr.shape, dflpng.get_landmarks() )[:,:,0] ).astype(np.uint8)
gray_masked = ( gray * LandmarksProcessor.get_image_hull_mask (bgr.shape, dflimg.get_landmarks() )[:,:,0] ).astype(np.uint8)
sharpness = estimate_sharpness(gray_masked)
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
except Exception as e:
print (e)
return [ 1, [str(filepath)] ]
return [ 0, [str(filepath), sharpness, hist, dflpng.get_yaw_value() ] ]
return [ 0, [str(filepath), sharpness, hist, dflimg.get_yaw_value() ] ]
#override
@ -689,15 +705,15 @@ def sort_by_origname(input_path):
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading", ascii=True):
filepath = Path(filepath)
if filepath.suffix != '.png':
print ("%s is not a png file required for sort_by_origname" % (filepath.name) )
if filepath.suffix == '.png':
dflimg = DFLPNG.load( str(filepath), print_on_no_embedded_data=True )
elif filepath.suffix == '.jpg':
dflimg = DFLJPG.load( str(filepath), print_on_no_embedded_data=True )
else:
print ("%s is not a dfl image file" % (filepath.name) )
continue
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
continue
img_list.append( [str(filepath), dflpng.get_source_filename()] )
img_list.append( [str(filepath), dflimg.get_source_filename()] )
print ("Sorting...")
img_list = sorted(img_list, key=operator.itemgetter(1))

46
mainscripts/Util.py Normal file
View file

@ -0,0 +1,46 @@
import os
import sys
import operator
import numpy as np
import cv2
from tqdm import tqdm
from shutil import copyfile
from pathlib import Path
from utils import Path_utils
from utils import image_utils
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from facelib import LandmarksProcessor
from utils.SubprocessorBase import SubprocessorBase
import multiprocessing
def convert_png_to_jpg_file (filepath):
filepath = Path(filepath)
if filepath.suffix != '.png':
return
dflpng = DFLPNG.load (str(filepath), print_on_no_embedded_data=True)
if dflpng is None:
return
dfl_dict = dflpng.getDFLDictData()
img = cv2.imread (str(filepath))
new_filepath = str(filepath.parent / (filepath.stem + '.jpg'))
cv2.imwrite ( new_filepath, img, [int(cv2.IMWRITE_JPEG_QUALITY), 85])
DFLJPG.embed_data( new_filepath, **dfl_dict )
filepath.unlink()
def convert_png_to_jpg_folder (input_path):
if not all(ord(c) < 128 for c in input_path):
print ("Path to directory must contain only non unicode characters.")
return
input_path = Path(input_path)
print ("Converting PNG to JPG...\r\n")
for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Converting", ascii=True):
filepath = Path(filepath)
convert_png_to_jpg_file(filepath)

View file

@ -111,6 +111,7 @@ keras_contrib = nnlib.keras_contrib
GroupNormalization = keras_contrib.layers.GroupNormalization
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Padam = keras_contrib.optimizers.Padam
PELU = keras_contrib.layers.advanced_activations.PELU
"""
code_import_dlib_string = \
"""
@ -449,20 +450,6 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
return (1.0 - tf.image.ssim ((y_true/2+0.5), (y_pred/2+0.5), 1.0)) / 2.0
nnlib.DSSIMLoss = DSSIMLoss
class DSSIMLoss(object):
def __init__(self, is_tanh=False):
self.is_tanh = is_tanh
def __call__(self,y_true, y_pred):
if not self.is_tanh:
loss = (1.0 - tf.image.ssim (y_true, y_pred, 1.0)) / 2.0
else:
loss = (1.0 - tf.image.ssim ( (y_true/2+0.5), (y_pred/2+0.5), 1.0)) / 2.0
return loss
nnlib.DSSIMLoss = DSSIMLoss
class DSSIMMaskLoss(object):
def __init__(self, mask_list, is_tanh=False):
self.mask_list = mask_list

View file

@ -6,6 +6,7 @@ from pathlib import Path
from utils import Path_utils
from utils.DFLPNG import DFLPNG
from utils.DFLJPG import DFLJPG
from .Sample import Sample
from .Sample import SampleType
@ -54,21 +55,22 @@ class SampleLoader:
sample_list = []
for s in tqdm( samples, desc="Loading", ascii=True ):
s_filename_path = Path(s.filename)
if s_filename_path.suffix != '.png':
print ("%s is not a png file required for training" % (s_filename_path.name) )
continue
dflpng = DFLPNG.load ( str(s_filename_path), print_on_no_embedded_data=True )
if dflpng is None:
if s_filename_path.suffix == '.png':
dflimg = DFLPNG.load ( str(s_filename_path), print_on_no_embedded_data=True )
if dflimg is None: continue
elif s_filename_path.suffix == '.jpg':
dflimg = DFLJPG.load ( str(s_filename_path), print_on_no_embedded_data=True )
if dflimg is None: continue
else:
print ("%s is not a dfl image file required for training" % (s_filename_path.name) )
continue
sample_list.append( s.copy_and_set(sample_type=SampleType.FACE,
face_type=FaceType.fromString (dflpng.get_face_type()),
shape=dflpng.get_shape(),
landmarks=dflpng.get_landmarks(),
yaw=dflpng.get_yaw_value()) )
face_type=FaceType.fromString (dflimg.get_face_type()),
shape=dflimg.get_shape(),
landmarks=dflimg.get_landmarks(),
yaw=dflimg.get_yaw_value()) )
return sample_list

229
utils/DFLJPG.py Normal file
View file

@ -0,0 +1,229 @@
import struct
import pickle
import numpy as np
from facelib import FaceType
from utils.struct_utils import *
class DFLJPG(object):
def __init__(self):
self.data = b""
self.length = 0
self.chunks = []
self.dfl_dict = None
@staticmethod
def load_raw(filename):
try:
with open(filename, "rb") as f:
data = f.read()
except:
raise FileNotFoundError(data)
try:
inst = DFLJPG()
inst.data = data
inst.length = len(data)
inst_length = inst.length
chunks = []
data_counter = 0
while data_counter < inst_length:
chunk_m_l, chunk_m_h = struct.unpack ("BB", data[data_counter:data_counter+2])
data_counter += 2
if chunk_m_l != 0xFF:
raise ValueError("No Valid JPG info")
chunk_name = None
chunk_size = None
chunk_data = None
chunk_ex_data = None
is_unk_chunk = False
if chunk_m_h & 0xF0 == 0xD0:
n = chunk_m_h & 0x0F
if n >= 0 and n <= 7:
chunk_name = "RST%d" % (n)
chunk_size = 0
elif n == 0x8:
chunk_name = "SOI"
chunk_size = 0
if len(chunks) != 0:
raise Exception("")
elif n == 0x9:
chunk_name = "EOI"
chunk_size = 0
elif n == 0xA:
chunk_name = "SOS"
elif n == 0xB:
chunk_name = "DQT"
elif n == 0xD:
chunk_name = "DRI"
chunk_size = 2
else:
is_unk_chunk = True
elif chunk_m_h & 0xF0 == 0xC0:
n = chunk_m_h & 0x0F
if n == 0:
chunk_name = "SOF0"
elif n == 2:
chunk_name = "SOF2"
elif n == 4:
chunk_name = "DHT"
else:
is_unk_chunk = True
elif chunk_m_h & 0xF0 == 0xE0:
n = chunk_m_h & 0x0F
chunk_name = "APP%d" % (n)
else:
is_unk_chunk = True
if is_unk_chunk:
raise ValueError("Unknown chunk %X" % (chunk_m_h) )
if chunk_size == None: #variable size
chunk_size, = struct.unpack (">H", data[data_counter:data_counter+2])
chunk_size -= 2
data_counter += 2
if chunk_size > 0:
chunk_data = data[data_counter:data_counter+chunk_size]
data_counter += chunk_size
if chunk_name == "SOS":
c = data_counter
while c < inst_length and (data[c] != 0xFF or data[c+1] != 0xD9):
c += 1
chunk_ex_data = data[data_counter:c]
data_counter = c
chunks.append ({'name' : chunk_name,
'm_h' : chunk_m_h,
'data' : chunk_data,
'ex_data' : chunk_ex_data,
})
inst.chunks = chunks
return inst
except Exception as e:
raise Exception("Corrupted JPG file: %s" % (str(e)))
return None
@staticmethod
def load(filename, print_on_no_embedded_data=False, throw_on_no_embedded_data=False):
inst = DFLJPG.load_raw (filename)
inst.dfl_dict = None
for chunk in inst.chunks:
if chunk['name'] == 'APP0':
d, c = chunk['data'], 0
c, id, _ = struct_unpack (d, c, "=4sB")
if id == b"JFIF":
c, ver_major, ver_minor, units, Xdensity, Ydensity, Xthumbnail, Ythumbnail = struct_unpack (d, c, "=BBBHHBB")
if units != 0:
raise Exception("JPG must be in pixel units.")
inst.shape = (Ydensity, Xdensity, 3)
else:
raise Exception("Unknown jpeg ID: %s" % (id) )
if chunk['name'] == 'APP15':
if type(chunk['data']) == bytes:
inst.dfl_dict = pickle.loads(chunk['data'])
if (inst.dfl_dict is not None) and ('face_type' not in inst.dfl_dict.keys()):
inst.dfl_dict['face_type'] = FaceType.toString (FaceType.FULL)
if inst.dfl_dict == None:
if print_on_no_embedded_data:
print ( "No DFL data found in %s" % (filename) )
if throw_on_no_embedded_data:
raise ValueError("No DFL data found in %s" % (filename) )
return None
return inst
@staticmethod
def embed_data(filename, face_type=None,
landmarks=None,
yaw_value=None,
pitch_value=None,
source_filename=None,
source_rect=None,
source_landmarks=None
):
inst = DFLJPG.load_raw (filename)
inst.setDFLDictData ({
'face_type': face_type,
'landmarks': landmarks,
'yaw_value': yaw_value,
'pitch_value': pitch_value,
'source_filename': source_filename,
'source_rect': source_rect,
'source_landmarks': source_landmarks
})
try:
with open(filename, "wb") as f:
f.write ( inst.dump() )
except:
raise Exception( 'cannot save %s' % (filename) )
def dump(self):
data = b""
for chunk in self.chunks:
data += struct.pack ("BB", 0xFF, chunk['m_h'] )
chunk_data = chunk['data']
if chunk_data is not None:
data += struct.pack (">H", len(chunk_data)+2 )
data += chunk_data
chunk_ex_data = chunk['ex_data']
if chunk_ex_data is not None:
data += chunk_ex_data
return data
def get_shape(self):
return self.shape
def get_height(self):
for chunk in self.chunks:
if type(chunk) == IHDR:
return chunk.height
return 0
def getDFLDictData(self):
return self.dfl_dict
def setDFLDictData (self, dict_data=None):
self.dfl_dict = dict_data
for chunk in self.chunks:
if chunk['name'] == 'APP15':
self.chunks.remove(chunk)
break
last_app_chunk = 0
for i, chunk in enumerate (self.chunks):
if chunk['m_h'] & 0xF0 == 0xE0:
last_app_chunk = i
dflchunk = {'name' : 'APP15',
'm_h' : 0xEF,
'data' : pickle.dumps(dict_data),
'ex_data' : None,
}
self.chunks.insert (last_app_chunk+1, dflchunk)
def get_face_type(self): return self.dfl_dict['face_type']
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
def get_yaw_value(self): return self.dfl_dict['yaw_value']
def get_pitch_value(self): return self.dfl_dict['pitch_value']
def get_source_filename(self): return self.dfl_dict['source_filename']
def get_source_rect(self): return self.dfl_dict['source_rect']
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )

6
utils/struct_utils.py Normal file
View file

@ -0,0 +1,6 @@
import struct
def struct_unpack(data, counter, fmt):
fmt_size = struct.calcsize(fmt)
return (counter+fmt_size,) + struct.unpack (fmt, data[counter:counter+fmt_size])