mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-08-20 05:23:22 -07:00
removing trailing spaces
This commit is contained in:
parent
fa4e579b95
commit
a3df04999c
61 changed files with 2110 additions and 2103 deletions
104
utils/DFLJPG.py
104
utils/DFLJPG.py
|
@ -11,7 +11,7 @@ class DFLJPG(object):
|
|||
self.chunks = []
|
||||
self.dfl_dict = None
|
||||
self.shape = (0,0,0)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def load_raw(filename):
|
||||
try:
|
||||
|
@ -19,7 +19,7 @@ class DFLJPG(object):
|
|||
data = f.read()
|
||||
except:
|
||||
raise FileNotFoundError(data)
|
||||
|
||||
|
||||
try:
|
||||
inst = DFLJPG()
|
||||
inst.data = data
|
||||
|
@ -30,23 +30,23 @@ class DFLJPG(object):
|
|||
while data_counter < inst_length:
|
||||
chunk_m_l, chunk_m_h = struct.unpack ("BB", data[data_counter:data_counter+2])
|
||||
data_counter += 2
|
||||
|
||||
|
||||
if chunk_m_l != 0xFF:
|
||||
raise ValueError("No Valid JPG info")
|
||||
|
||||
|
||||
chunk_name = None
|
||||
chunk_size = None
|
||||
chunk_data = None
|
||||
chunk_ex_data = None
|
||||
is_unk_chunk = False
|
||||
|
||||
if chunk_m_h & 0xF0 == 0xD0:
|
||||
|
||||
if chunk_m_h & 0xF0 == 0xD0:
|
||||
n = chunk_m_h & 0x0F
|
||||
|
||||
if n >= 0 and n <= 7:
|
||||
|
||||
if n >= 0 and n <= 7:
|
||||
chunk_name = "RST%d" % (n)
|
||||
chunk_size = 0
|
||||
elif n == 0x8:
|
||||
elif n == 0x8:
|
||||
chunk_name = "SOI"
|
||||
chunk_size = 0
|
||||
if len(chunks) != 0:
|
||||
|
@ -54,73 +54,73 @@ class DFLJPG(object):
|
|||
elif n == 0x9:
|
||||
chunk_name = "EOI"
|
||||
chunk_size = 0
|
||||
elif n == 0xA:
|
||||
chunk_name = "SOS"
|
||||
elif n == 0xB:
|
||||
elif n == 0xA:
|
||||
chunk_name = "SOS"
|
||||
elif n == 0xB:
|
||||
chunk_name = "DQT"
|
||||
elif n == 0xD:
|
||||
chunk_name = "DRI"
|
||||
chunk_size = 2
|
||||
else:
|
||||
is_unk_chunk = True
|
||||
elif chunk_m_h & 0xF0 == 0xC0:
|
||||
n = chunk_m_h & 0x0F
|
||||
if n == 0:
|
||||
elif chunk_m_h & 0xF0 == 0xC0:
|
||||
n = chunk_m_h & 0x0F
|
||||
if n == 0:
|
||||
chunk_name = "SOF0"
|
||||
elif n == 2:
|
||||
elif n == 2:
|
||||
chunk_name = "SOF2"
|
||||
elif n == 4:
|
||||
elif n == 4:
|
||||
chunk_name = "DHT"
|
||||
else:
|
||||
is_unk_chunk = True
|
||||
elif chunk_m_h & 0xF0 == 0xE0:
|
||||
elif chunk_m_h & 0xF0 == 0xE0:
|
||||
n = chunk_m_h & 0x0F
|
||||
chunk_name = "APP%d" % (n)
|
||||
else:
|
||||
is_unk_chunk = True
|
||||
|
||||
|
||||
if is_unk_chunk:
|
||||
raise ValueError("Unknown chunk %X" % (chunk_m_h) )
|
||||
|
||||
raise ValueError("Unknown chunk %X" % (chunk_m_h) )
|
||||
|
||||
if chunk_size == None: #variable size
|
||||
chunk_size, = struct.unpack (">H", data[data_counter:data_counter+2])
|
||||
chunk_size -= 2
|
||||
data_counter += 2
|
||||
|
||||
|
||||
if chunk_size > 0:
|
||||
chunk_data = data[data_counter:data_counter+chunk_size]
|
||||
data_counter += chunk_size
|
||||
|
||||
|
||||
if chunk_name == "SOS":
|
||||
c = data_counter
|
||||
c = data_counter
|
||||
while c < inst_length and (data[c] != 0xFF or data[c+1] != 0xD9):
|
||||
c += 1
|
||||
|
||||
|
||||
chunk_ex_data = data[data_counter:c]
|
||||
data_counter = c
|
||||
|
||||
|
||||
chunks.append ({'name' : chunk_name,
|
||||
'm_h' : chunk_m_h,
|
||||
'data' : chunk_data,
|
||||
'ex_data' : chunk_ex_data,
|
||||
})
|
||||
})
|
||||
inst.chunks = chunks
|
||||
|
||||
|
||||
return inst
|
||||
except Exception as e:
|
||||
raise Exception ("Corrupted JPG file: %s" % (str(e)))
|
||||
|
||||
|
||||
@staticmethod
|
||||
def load(filename):
|
||||
try:
|
||||
inst = DFLJPG.load_raw (filename)
|
||||
inst.dfl_dict = None
|
||||
|
||||
|
||||
for chunk in inst.chunks:
|
||||
if chunk['name'] == 'APP0':
|
||||
d, c = chunk['data'], 0
|
||||
c, id, _ = struct_unpack (d, c, "=4sB")
|
||||
|
||||
|
||||
if id == b"JFIF":
|
||||
c, ver_major, ver_minor, units, Xdensity, Ydensity, Xthumbnail, Ythumbnail = struct_unpack (d, c, "=BBBHHBB")
|
||||
#if units == 0:
|
||||
|
@ -131,22 +131,22 @@ class DFLJPG(object):
|
|||
d, c = chunk['data'], 0
|
||||
c, precision, height, width = struct_unpack (d, c, ">BHH")
|
||||
inst.shape = (height, width, 3)
|
||||
|
||||
|
||||
elif chunk['name'] == 'APP15':
|
||||
if type(chunk['data']) == bytes:
|
||||
inst.dfl_dict = pickle.loads(chunk['data'])
|
||||
|
||||
if (inst.dfl_dict is not None) and ('face_type' not in inst.dfl_dict.keys()):
|
||||
inst.dfl_dict['face_type'] = FaceType.toString (FaceType.FULL)
|
||||
|
||||
|
||||
if inst.dfl_dict == None:
|
||||
return None
|
||||
|
||||
|
||||
return inst
|
||||
except Exception as e:
|
||||
print (e)
|
||||
return None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def embed_data(filename, face_type=None,
|
||||
landmarks=None,
|
||||
|
@ -155,7 +155,7 @@ class DFLJPG(object):
|
|||
source_landmarks=None,
|
||||
image_to_face_mat=None
|
||||
):
|
||||
|
||||
|
||||
inst = DFLJPG.load_raw (filename)
|
||||
inst.setDFLDictData ({
|
||||
'face_type': face_type,
|
||||
|
@ -165,41 +165,41 @@ class DFLJPG(object):
|
|||
'source_landmarks': source_landmarks,
|
||||
'image_to_face_mat': image_to_face_mat
|
||||
})
|
||||
|
||||
|
||||
try:
|
||||
with open(filename, "wb") as f:
|
||||
f.write ( inst.dump() )
|
||||
except:
|
||||
raise Exception( 'cannot save %s' % (filename) )
|
||||
|
||||
|
||||
def dump(self):
|
||||
data = b""
|
||||
|
||||
|
||||
for chunk in self.chunks:
|
||||
data += struct.pack ("BB", 0xFF, chunk['m_h'] )
|
||||
chunk_data = chunk['data']
|
||||
if chunk_data is not None:
|
||||
data += struct.pack (">H", len(chunk_data)+2 )
|
||||
data += chunk_data
|
||||
|
||||
|
||||
chunk_ex_data = chunk['ex_data']
|
||||
if chunk_ex_data is not None:
|
||||
if chunk_ex_data is not None:
|
||||
data += chunk_ex_data
|
||||
|
||||
return data
|
||||
|
||||
def get_shape(self):
|
||||
|
||||
def get_shape(self):
|
||||
return self.shape
|
||||
|
||||
|
||||
def get_height(self):
|
||||
for chunk in self.chunks:
|
||||
if type(chunk) == IHDR:
|
||||
return chunk.height
|
||||
return 0
|
||||
|
||||
|
||||
def getDFLDictData(self):
|
||||
return self.dfl_dict
|
||||
|
||||
|
||||
def setDFLDictData (self, dict_data=None):
|
||||
self.dfl_dict = dict_data
|
||||
|
||||
|
@ -211,17 +211,17 @@ class DFLJPG(object):
|
|||
last_app_chunk = 0
|
||||
for i, chunk in enumerate (self.chunks):
|
||||
if chunk['m_h'] & 0xF0 == 0xE0:
|
||||
last_app_chunk = i
|
||||
|
||||
last_app_chunk = i
|
||||
|
||||
dflchunk = {'name' : 'APP15',
|
||||
'm_h' : 0xEF,
|
||||
'data' : pickle.dumps(dict_data),
|
||||
'ex_data' : None,
|
||||
}
|
||||
self.chunks.insert (last_app_chunk+1, dflchunk)
|
||||
|
||||
|
||||
def get_face_type(self): return self.dfl_dict['face_type']
|
||||
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
|
||||
def get_source_filename(self): return self.dfl_dict['source_filename']
|
||||
def get_source_rect(self): return self.dfl_dict['source_rect']
|
||||
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
|
||||
def get_source_filename(self): return self.dfl_dict['source_filename']
|
||||
def get_source_rect(self): return self.dfl_dict['source_rect']
|
||||
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
|
||||
|
|
|
@ -110,7 +110,7 @@ class Chunk(object):
|
|||
|
||||
def __str__(self):
|
||||
return "<Chunk '{name}' length={length} crc={crc:08X}>".format(**self.__dict__)
|
||||
|
||||
|
||||
class IHDR(Chunk):
|
||||
"""IHDR Chunk
|
||||
width, height, bit_depth, color_type, compression_method,
|
||||
|
@ -189,24 +189,24 @@ class IEND(Chunk):
|
|||
class DFLChunk(Chunk):
|
||||
def __init__(self, dict_data=None):
|
||||
super().__init__("fcWp")
|
||||
self.dict_data = dict_data
|
||||
self.dict_data = dict_data
|
||||
|
||||
def setDictData(self, dict_data):
|
||||
self.dict_data = dict_data
|
||||
|
||||
|
||||
def getDictData(self):
|
||||
return self.dict_data
|
||||
|
||||
|
||||
@classmethod
|
||||
def load(cls, data):
|
||||
inst = super().load(data)
|
||||
inst.dict_data = pickle.loads( inst.data )
|
||||
inst.dict_data = pickle.loads( inst.data )
|
||||
return inst
|
||||
|
||||
|
||||
def dump(self):
|
||||
self.data = pickle.dumps (self.dict_data)
|
||||
return super().dump()
|
||||
|
||||
|
||||
chunk_map = {
|
||||
b"IHDR": IHDR,
|
||||
b"fcWp": DFLChunk,
|
||||
|
@ -219,7 +219,7 @@ class DFLPNG(object):
|
|||
self.length = 0
|
||||
self.chunks = []
|
||||
self.fcwp_dict = None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def load_raw(filename):
|
||||
try:
|
||||
|
@ -227,11 +227,11 @@ class DFLPNG(object):
|
|||
data = f.read()
|
||||
except:
|
||||
raise FileNotFoundError(data)
|
||||
|
||||
|
||||
inst = DFLPNG()
|
||||
inst.data = data
|
||||
inst.length = len(data)
|
||||
|
||||
|
||||
if data[0:8] != PNG_HEADER:
|
||||
msg = "No Valid PNG header"
|
||||
raise ValueError(msg)
|
||||
|
@ -244,26 +244,26 @@ class DFLPNG(object):
|
|||
chunk = chunk_map.get(chunk_name, Chunk).load(data[chunk_start:chunk_end])
|
||||
inst.chunks.append(chunk)
|
||||
chunk_start = chunk_end
|
||||
|
||||
|
||||
return inst
|
||||
|
||||
|
||||
@staticmethod
|
||||
def load(filename):
|
||||
try:
|
||||
inst = DFLPNG.load_raw (filename)
|
||||
inst.fcwp_dict = inst.getDFLDictData()
|
||||
|
||||
|
||||
if (inst.fcwp_dict is not None) and ('face_type' not in inst.fcwp_dict.keys()):
|
||||
inst.fcwp_dict['face_type'] = FaceType.toString (FaceType.FULL)
|
||||
|
||||
|
||||
if inst.fcwp_dict == None:
|
||||
return None
|
||||
|
||||
|
||||
return inst
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def embed_data(filename, face_type=None,
|
||||
landmarks=None,
|
||||
|
@ -271,7 +271,7 @@ class DFLPNG(object):
|
|||
source_rect=None,
|
||||
source_landmarks=None
|
||||
):
|
||||
|
||||
|
||||
inst = DFLPNG.load_raw (filename)
|
||||
inst.setDFLDictData ({
|
||||
'face_type': face_type,
|
||||
|
@ -280,7 +280,7 @@ class DFLPNG(object):
|
|||
'source_rect': source_rect,
|
||||
'source_landmarks': source_landmarks
|
||||
})
|
||||
|
||||
|
||||
try:
|
||||
with open(filename, "wb") as f:
|
||||
f.write ( inst.dump() )
|
||||
|
@ -292,7 +292,7 @@ class DFLPNG(object):
|
|||
for chunk in self.chunks:
|
||||
data += chunk.dump()
|
||||
return data
|
||||
|
||||
|
||||
def get_shape(self):
|
||||
for chunk in self.chunks:
|
||||
if type(chunk) == IHDR:
|
||||
|
@ -301,34 +301,34 @@ class DFLPNG(object):
|
|||
h = chunk.height
|
||||
return (h,w,c)
|
||||
return (0,0,0)
|
||||
|
||||
|
||||
def get_height(self):
|
||||
for chunk in self.chunks:
|
||||
if type(chunk) == IHDR:
|
||||
return chunk.height
|
||||
return 0
|
||||
|
||||
def getDFLDictData(self):
|
||||
|
||||
def getDFLDictData(self):
|
||||
for chunk in self.chunks:
|
||||
if type(chunk) == DFLChunk:
|
||||
return chunk.getDictData()
|
||||
return None
|
||||
|
||||
|
||||
def setDFLDictData (self, dict_data=None):
|
||||
for chunk in self.chunks:
|
||||
if type(chunk) == DFLChunk:
|
||||
self.chunks.remove(chunk)
|
||||
break
|
||||
|
||||
|
||||
if not dict_data is None:
|
||||
chunk = DFLChunk(dict_data)
|
||||
self.chunks.insert(-1, chunk)
|
||||
|
||||
def get_face_type(self): return self.fcwp_dict['face_type']
|
||||
|
||||
def get_face_type(self): return self.fcwp_dict['face_type']
|
||||
def get_landmarks(self): return np.array ( self.fcwp_dict['landmarks'] )
|
||||
def get_source_filename(self): return self.fcwp_dict['source_filename']
|
||||
def get_source_rect(self): return self.fcwp_dict['source_rect']
|
||||
def get_source_landmarks(self): return np.array ( self.fcwp_dict['source_landmarks'] )
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return "<PNG length={length} chunks={}>".format(len(self.chunks), **self.__dict__)
|
||||
|
|
|
@ -5,8 +5,8 @@ image_extensions = [".jpg", ".jpeg", ".png", ".tif", ".tiff"]
|
|||
|
||||
def get_image_paths(dir_path, image_extensions=image_extensions):
|
||||
dir_path = Path (dir_path)
|
||||
|
||||
result = []
|
||||
|
||||
result = []
|
||||
if dir_path.exists():
|
||||
for x in list(scandir(str(dir_path))):
|
||||
if any([x.name.lower().endswith(ext) for ext in image_extensions]):
|
||||
|
@ -14,25 +14,25 @@ def get_image_paths(dir_path, image_extensions=image_extensions):
|
|||
return result
|
||||
|
||||
def get_image_unique_filestem_paths(dir_path, verbose_print_func=None):
|
||||
result = get_image_paths(dir_path)
|
||||
result_dup = set()
|
||||
|
||||
result = get_image_paths(dir_path)
|
||||
result_dup = set()
|
||||
|
||||
for f in result[:]:
|
||||
f_stem = Path(f).stem
|
||||
if f_stem in result_dup:
|
||||
if f_stem in result_dup:
|
||||
result.remove(f)
|
||||
if verbose_print_func is not None:
|
||||
verbose_print_func ("Duplicate filenames are not allowed, skipping: %s" % Path(f).name )
|
||||
continue
|
||||
verbose_print_func ("Duplicate filenames are not allowed, skipping: %s" % Path(f).name )
|
||||
continue
|
||||
result_dup.add(f_stem)
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_all_dir_names_startswith (dir_path, startswith):
|
||||
dir_path = Path (dir_path)
|
||||
startswith = startswith.lower()
|
||||
|
||||
result = []
|
||||
|
||||
result = []
|
||||
if dir_path.exists():
|
||||
for x in list(scandir(str(dir_path))):
|
||||
if x.name.lower().startswith(startswith):
|
||||
|
@ -42,7 +42,7 @@ def get_all_dir_names_startswith (dir_path, startswith):
|
|||
def get_first_file_by_stem (dir_path, stem, exts=None):
|
||||
dir_path = Path (dir_path)
|
||||
stem = stem.lower()
|
||||
|
||||
|
||||
if dir_path.exists():
|
||||
for x in list(scandir(str(dir_path))):
|
||||
if not x.is_file():
|
||||
|
@ -50,5 +50,5 @@ def get_first_file_by_stem (dir_path, stem, exts=None):
|
|||
xp = Path(x.path)
|
||||
if xp.stem.lower() == stem and (exts is None or xp.suffix.lower() in exts):
|
||||
return xp
|
||||
|
||||
return None
|
||||
|
||||
return None
|
||||
|
|
|
@ -11,7 +11,7 @@ def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED):
|
|||
return cv2.imdecode(numpyarray, flags)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def cv2_imwrite(filename, img, *args):
|
||||
ret, buf = cv2.imencode( Path(filename).suffix, img, *args)
|
||||
if ret == True:
|
||||
|
@ -19,4 +19,4 @@ def cv2_imwrite(filename, img, *args):
|
|||
with open(filename, "wb") as stream:
|
||||
stream.write( buf )
|
||||
except:
|
||||
pass
|
||||
pass
|
||||
|
|
|
@ -21,7 +21,7 @@ def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, so
|
|||
OpenCV image in BGR color space (the source image)
|
||||
target: NumPy array
|
||||
OpenCV image in BGR color space (the target image)
|
||||
clip: Should components of L*a*b* image be scaled by np.clip before
|
||||
clip: Should components of L*a*b* image be scaled by np.clip before
|
||||
converting back to BGR color space?
|
||||
If False then components will be min-max scaled appropriately.
|
||||
Clipping will keep target image brightness truer to the input.
|
||||
|
@ -32,7 +32,7 @@ def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, so
|
|||
aesthetically pleasing results.
|
||||
If False then L*a*b* components will scaled using the reciprocal of
|
||||
the scaling factor proposed in the paper. This method seems to produce
|
||||
more consistently aesthetically pleasing results
|
||||
more consistently aesthetically pleasing results
|
||||
|
||||
Returns:
|
||||
-------
|
||||
|
@ -40,13 +40,13 @@ def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, so
|
|||
OpenCV image (w, h, 3) NumPy array (uint8)
|
||||
"""
|
||||
|
||||
|
||||
|
||||
# convert the images from the RGB to L*ab* color space, being
|
||||
# sure to utilizing the floating point data type (note: OpenCV
|
||||
# expects floats to be 32-bit, so use that instead of 64-bit)
|
||||
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32)
|
||||
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32)
|
||||
|
||||
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32)
|
||||
|
||||
# compute color statistics for the source and target images
|
||||
src_input = source if source_mask is None else source*source_mask
|
||||
tgt_input = target if target_mask is None else target*target_mask
|
||||
|
@ -86,7 +86,7 @@ def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, so
|
|||
# type
|
||||
transfer = cv2.merge([l, a, b])
|
||||
transfer = cv2.cvtColor(transfer.astype(np.uint8), cv2.COLOR_LAB2BGR)
|
||||
|
||||
|
||||
# return the color transferred image
|
||||
return transfer
|
||||
|
||||
|
@ -127,7 +127,7 @@ def linear_color_transfer(target_img, source_img, mode='pca', eps=1e-5):
|
|||
matched_img[matched_img>1] = 1
|
||||
matched_img[matched_img<0] = 0
|
||||
return matched_img
|
||||
|
||||
|
||||
def lab_image_stats(image):
|
||||
# compute the mean and standard deviation of each channel
|
||||
(l, a, b) = cv2.split(image)
|
||||
|
@ -137,7 +137,7 @@ def lab_image_stats(image):
|
|||
|
||||
# return the color statistics
|
||||
return (lMean, lStd, aMean, aStd, bMean, bStd)
|
||||
|
||||
|
||||
def _scale_array(arr, clip=True):
|
||||
if clip:
|
||||
return np.clip(arr, 0, 255)
|
||||
|
@ -145,12 +145,12 @@ def _scale_array(arr, clip=True):
|
|||
mn = arr.min()
|
||||
mx = arr.max()
|
||||
scale_range = (max([mn, 0]), min([mx, 255]))
|
||||
|
||||
|
||||
if mn < scale_range[0] or mx > scale_range[1]:
|
||||
return (scale_range[1] - scale_range[0]) * (arr - mn) / (mx - mn) + scale_range[0]
|
||||
|
||||
return arr
|
||||
|
||||
|
||||
def channel_hist_match(source, template, hist_match_threshold=255, mask=None):
|
||||
# Code borrowed from:
|
||||
# https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x
|
||||
|
@ -179,22 +179,22 @@ def channel_hist_match(source, template, hist_match_threshold=255, mask=None):
|
|||
t_quantiles = 255 * t_quantiles / t_quantiles[-1]
|
||||
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
|
||||
|
||||
return interp_t_values[bin_idx].reshape(oldshape)
|
||||
return interp_t_values[bin_idx].reshape(oldshape)
|
||||
|
||||
def color_hist_match(src_im, tar_im, hist_match_threshold=255):
|
||||
h,w,c = src_im.shape
|
||||
matched_R = channel_hist_match(src_im[:,:,0], tar_im[:,:,0], hist_match_threshold, None)
|
||||
matched_G = channel_hist_match(src_im[:,:,1], tar_im[:,:,1], hist_match_threshold, None)
|
||||
matched_B = channel_hist_match(src_im[:,:,2], tar_im[:,:,2], hist_match_threshold, None)
|
||||
|
||||
|
||||
to_stack = (matched_R, matched_G, matched_B)
|
||||
for i in range(3, c):
|
||||
to_stack += ( src_im[:,:,i],)
|
||||
|
||||
|
||||
|
||||
|
||||
matched = np.stack(to_stack, axis=-1).astype(src_im.dtype)
|
||||
return matched
|
||||
|
||||
|
||||
|
||||
pil_fonts = {}
|
||||
def _get_pil_font (font, size):
|
||||
|
@ -204,65 +204,65 @@ def _get_pil_font (font, size):
|
|||
if font_str_id not in pil_fonts.keys():
|
||||
pil_fonts[font_str_id] = ImageFont.truetype(font + ".ttf", size=size, encoding="unic")
|
||||
pil_font = pil_fonts[font_str_id]
|
||||
return pil_font
|
||||
return pil_font
|
||||
except:
|
||||
return ImageFont.load_default()
|
||||
|
||||
|
||||
def get_text_image( shape, text, color=(1,1,1), border=0.2, font=None):
|
||||
try:
|
||||
try:
|
||||
size = shape[1]
|
||||
pil_font = _get_pil_font( localization.get_default_ttf_font_name() , size)
|
||||
text_width, text_height = pil_font.getsize(text)
|
||||
|
||||
|
||||
canvas = Image.new('RGB', shape[0:2], (0,0,0) )
|
||||
draw = ImageDraw.Draw(canvas)
|
||||
offset = ( 0, 0)
|
||||
draw.text(offset, text, font=pil_font, fill=tuple((np.array(color)*255).astype(np.int)) )
|
||||
|
||||
|
||||
result = np.asarray(canvas) / 255
|
||||
if shape[2] != 3:
|
||||
if shape[2] != 3:
|
||||
result = np.concatenate ( (result, np.ones ( (shape[1],) + (shape[0],) + (shape[2]-3,)) ), axis=2 )
|
||||
|
||||
return result
|
||||
except:
|
||||
except:
|
||||
return np.zeros ( (shape[1], shape[0], shape[2]), dtype=np.float32 )
|
||||
|
||||
|
||||
def draw_text( image, rect, text, color=(1,1,1), border=0.2, font=None):
|
||||
h,w,c = image.shape
|
||||
|
||||
|
||||
l,t,r,b = rect
|
||||
l = np.clip (l, 0, w-1)
|
||||
r = np.clip (r, 0, w-1)
|
||||
t = np.clip (t, 0, h-1)
|
||||
b = np.clip (b, 0, h-1)
|
||||
|
||||
|
||||
image[t:b, l:r] += get_text_image ( (r-l,b-t,c) , text, color, border, font )
|
||||
|
||||
|
||||
def draw_text_lines (image, rect, text_lines, color=(1,1,1), border=0.2, font=None):
|
||||
text_lines_len = len(text_lines)
|
||||
if text_lines_len == 0:
|
||||
return
|
||||
|
||||
|
||||
l,t,r,b = rect
|
||||
h = b-t
|
||||
h_per_line = h // text_lines_len
|
||||
|
||||
|
||||
for i in range(0, text_lines_len):
|
||||
draw_text (image, (l, i*h_per_line, r, (i+1)*h_per_line), text_lines[i], color, border, font)
|
||||
|
||||
|
||||
def get_draw_text_lines ( image, rect, text_lines, color=(1,1,1), border=0.2, font=None):
|
||||
image = np.zeros ( image.shape, dtype=np.float )
|
||||
draw_text_lines ( image, rect, text_lines, color, border, font)
|
||||
return image
|
||||
|
||||
|
||||
|
||||
|
||||
def draw_polygon (image, points, color, thickness = 1):
|
||||
points_len = len(points)
|
||||
for i in range (0, points_len):
|
||||
p0 = tuple( points[i] )
|
||||
p1 = tuple( points[ (i+1) % points_len] )
|
||||
cv2.line (image, p0, p1, color, thickness=thickness)
|
||||
|
||||
|
||||
def draw_rect(image, rect, color, thickness=1):
|
||||
l,t,r,b = rect
|
||||
draw_polygon (image, [ (l,t), (r,t), (r,b), (l,b ) ], color, thickness)
|
||||
|
@ -272,40 +272,40 @@ def rectContains(rect, point) :
|
|||
|
||||
def applyAffineTransform(src, srcTri, dstTri, size) :
|
||||
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
|
||||
return cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
|
||||
|
||||
def morphTriangle(dst_img, src_img, st, dt) :
|
||||
return cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
|
||||
|
||||
def morphTriangle(dst_img, src_img, st, dt) :
|
||||
(h,w,c) = dst_img.shape
|
||||
sr = np.array( cv2.boundingRect(np.float32(st)) )
|
||||
dr = np.array( cv2.boundingRect(np.float32(dt)) )
|
||||
sRect = st - sr[0:2]
|
||||
dRect = dt - dr[0:2]
|
||||
d_mask = np.zeros((dr[3], dr[2], c), dtype = np.float32)
|
||||
cv2.fillConvexPoly(d_mask, np.int32(dRect), (1.0,)*c, 8, 0);
|
||||
imgRect = src_img[sr[1]:sr[1] + sr[3], sr[0]:sr[0] + sr[2]]
|
||||
size = (dr[2], dr[3])
|
||||
warpImage1 = applyAffineTransform(imgRect, sRect, dRect, size)
|
||||
cv2.fillConvexPoly(d_mask, np.int32(dRect), (1.0,)*c, 8, 0);
|
||||
imgRect = src_img[sr[1]:sr[1] + sr[3], sr[0]:sr[0] + sr[2]]
|
||||
size = (dr[2], dr[3])
|
||||
warpImage1 = applyAffineTransform(imgRect, sRect, dRect, size)
|
||||
|
||||
if c == 1:
|
||||
warpImage1 = np.expand_dims( warpImage1, -1 )
|
||||
|
||||
dst_img[dr[1]:dr[1]+dr[3], dr[0]:dr[0]+dr[2]] = dst_img[dr[1]:dr[1]+dr[3], dr[0]:dr[0]+dr[2]]*(1-d_mask) + warpImage1 * d_mask
|
||||
|
||||
|
||||
def morph_by_points (image, sp, dp):
|
||||
if sp.shape != dp.shape:
|
||||
raise ValueError ('morph_by_points() sp.shape != dp.shape')
|
||||
(h,w,c) = image.shape
|
||||
(h,w,c) = image.shape
|
||||
|
||||
result_image = np.zeros(image.shape, dtype = image.dtype)
|
||||
|
||||
for tri in Delaunay(dp).simplices:
|
||||
for tri in Delaunay(dp).simplices:
|
||||
morphTriangle(result_image, image, sp[tri], dp[tri])
|
||||
|
||||
|
||||
return result_image
|
||||
|
||||
|
||||
def equalize_and_stack_square (images, axis=1):
|
||||
max_c = max ([ 1 if len(image.shape) == 2 else image.shape[2] for image in images ] )
|
||||
|
||||
|
||||
target_wh = 99999
|
||||
for i,image in enumerate(images):
|
||||
if len(image.shape) == 2:
|
||||
|
@ -313,113 +313,112 @@ def equalize_and_stack_square (images, axis=1):
|
|||
c = 1
|
||||
else:
|
||||
h,w,c = image.shape
|
||||
|
||||
|
||||
if h < target_wh:
|
||||
target_wh = h
|
||||
|
||||
|
||||
if w < target_wh:
|
||||
target_wh = w
|
||||
|
||||
|
||||
for i,image in enumerate(images):
|
||||
if len(image.shape) == 2:
|
||||
h,w = image.shape
|
||||
c = 1
|
||||
else:
|
||||
h,w,c = image.shape
|
||||
|
||||
|
||||
if c < max_c:
|
||||
if c == 1:
|
||||
if len(image.shape) == 2:
|
||||
image = np.expand_dims ( image, -1 )
|
||||
image = np.expand_dims ( image, -1 )
|
||||
image = np.concatenate ( (image,)*max_c, -1 )
|
||||
elif c == 2: #GA
|
||||
image = np.expand_dims ( image[...,0], -1 )
|
||||
image = np.concatenate ( (image,)*max_c, -1 )
|
||||
image = np.concatenate ( (image,)*max_c, -1 )
|
||||
else:
|
||||
image = np.concatenate ( (image, np.ones((h,w,max_c - c))), -1 )
|
||||
|
||||
if h != target_wh or w != target_wh:
|
||||
image = cv2.resize ( image, (target_wh, target_wh) )
|
||||
h,w,c = image.shape
|
||||
|
||||
|
||||
images[i] = image
|
||||
|
||||
|
||||
return np.concatenate ( images, axis = 1 )
|
||||
|
||||
def bgr2hsv (img):
|
||||
|
||||
def bgr2hsv (img):
|
||||
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
||||
|
||||
def hsv2bgr (img):
|
||||
|
||||
def hsv2bgr (img):
|
||||
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
|
||||
|
||||
def bgra2hsva (img):
|
||||
|
||||
def bgra2hsva (img):
|
||||
return np.concatenate ( (cv2.cvtColor(img[...,0:3], cv2.COLOR_BGR2HSV ), np.expand_dims (img[...,3], -1)), -1 )
|
||||
|
||||
def bgra2hsva_list (imgs):
|
||||
return [ bgra2hsva(img) for img in imgs ]
|
||||
|
||||
|
||||
def hsva2bgra (img):
|
||||
return np.concatenate ( (cv2.cvtColor(img[...,0:3], cv2.COLOR_HSV2BGR ), np.expand_dims (img[...,3], -1)), -1 )
|
||||
|
||||
def hsva2bgra_list (imgs):
|
||||
return [ hsva2bgra(img) for img in imgs ]
|
||||
|
||||
|
||||
def gen_warp_params (source, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ):
|
||||
h,w,c = source.shape
|
||||
if (h != w) or (w != 64 and w != 128 and w != 256 and w != 512 and w != 1024):
|
||||
raise ValueError ('TrainingDataGenerator accepts only square power of 2 images.')
|
||||
|
||||
|
||||
rotation = np.random.uniform( rotation_range[0], rotation_range[1] )
|
||||
scale = np.random.uniform(1 +scale_range[0], 1 +scale_range[1])
|
||||
tx = np.random.uniform( tx_range[0], tx_range[1] )
|
||||
ty = np.random.uniform( ty_range[0], ty_range[1] )
|
||||
|
||||
ty = np.random.uniform( ty_range[0], ty_range[1] )
|
||||
|
||||
#random warp by grid
|
||||
cell_size = [ w // (2**i) for i in range(1,4) ] [ np.random.randint(3) ]
|
||||
cell_count = w // cell_size + 1
|
||||
|
||||
|
||||
grid_points = np.linspace( 0, w, cell_count)
|
||||
mapx = np.broadcast_to(grid_points, (cell_count, cell_count)).copy()
|
||||
mapy = mapx.T
|
||||
|
||||
|
||||
mapx[1:-1,1:-1] = mapx[1:-1,1:-1] + random_utils.random_normal( size=(cell_count-2, cell_count-2) )*(cell_size*0.24)
|
||||
mapy[1:-1,1:-1] = mapy[1:-1,1:-1] + random_utils.random_normal( size=(cell_count-2, cell_count-2) )*(cell_size*0.24)
|
||||
|
||||
half_cell_size = cell_size // 2
|
||||
|
||||
|
||||
mapx = cv2.resize(mapx, (w+cell_size,)*2 )[half_cell_size:-half_cell_size-1,half_cell_size:-half_cell_size-1].astype(np.float32)
|
||||
mapy = cv2.resize(mapy, (w+cell_size,)*2 )[half_cell_size:-half_cell_size-1,half_cell_size:-half_cell_size-1].astype(np.float32)
|
||||
|
||||
|
||||
#random transform
|
||||
random_transform_mat = cv2.getRotationMatrix2D((w // 2, w // 2), rotation, scale)
|
||||
random_transform_mat[:, 2] += (tx*w, ty*w)
|
||||
|
||||
|
||||
params = dict()
|
||||
params['mapx'] = mapx
|
||||
params['mapy'] = mapy
|
||||
params['rmat'] = random_transform_mat
|
||||
params['w'] = w
|
||||
params['w'] = w
|
||||
params['flip'] = flip and np.random.randint(10) < 4
|
||||
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def warp_by_params (params, img, warp, transform, flip, is_border_replicate):
|
||||
if warp:
|
||||
img = cv2.remap(img, params['mapx'], params['mapy'], cv2.INTER_CUBIC )
|
||||
if transform:
|
||||
img = cv2.warpAffine( img, params['rmat'], (params['w'], params['w']), borderMode=(cv2.BORDER_REPLICATE if is_border_replicate else cv2.BORDER_CONSTANT), flags=cv2.INTER_CUBIC )
|
||||
img = cv2.warpAffine( img, params['rmat'], (params['w'], params['w']), borderMode=(cv2.BORDER_REPLICATE if is_border_replicate else cv2.BORDER_CONSTANT), flags=cv2.INTER_CUBIC )
|
||||
if flip and params['flip']:
|
||||
img = img[:,::-1,:]
|
||||
return img
|
||||
|
||||
|
||||
#n_colors = [0..256]
|
||||
def reduce_colors (img_bgr, n_colors):
|
||||
img_rgb = (cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) * 255.0).astype(np.uint8)
|
||||
img_rgb_pil = Image.fromarray(img_rgb)
|
||||
img_rgb_pil_p = img_rgb_pil.convert('P', palette=Image.ADAPTIVE, colors=n_colors)
|
||||
|
||||
|
||||
img_rgb_p = img_rgb_pil_p.convert('RGB')
|
||||
img_bgr = cv2.cvtColor( np.array(img_rgb_p, dtype=np.float32) / 255.0, cv2.COLOR_RGB2BGR )
|
||||
|
||||
|
||||
return img_bgr
|
||||
|
|
@ -5,7 +5,7 @@ import time
|
|||
|
||||
|
||||
class ThisThreadGenerator(object):
|
||||
def __init__(self, generator_func, user_param=None):
|
||||
def __init__(self, generator_func, user_param=None):
|
||||
super().__init__()
|
||||
self.generator_func = generator_func
|
||||
self.user_param = user_param
|
||||
|
@ -13,30 +13,30 @@ class ThisThreadGenerator(object):
|
|||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
|
||||
def __next__(self):
|
||||
if not self.initialized:
|
||||
if not self.initialized:
|
||||
self.initialized = True
|
||||
self.generator_func = self.generator_func(self.user_param)
|
||||
|
||||
return next(self.generator_func)
|
||||
|
||||
class SubprocessGenerator(object):
|
||||
def __init__(self, generator_func, user_param=None, prefetch=2):
|
||||
super().__init__()
|
||||
def __init__(self, generator_func, user_param=None, prefetch=2):
|
||||
super().__init__()
|
||||
self.prefetch = prefetch
|
||||
self.generator_func = generator_func
|
||||
self.user_param = user_param
|
||||
self.sc_queue = multiprocessing.Queue()
|
||||
self.cs_queue = multiprocessing.Queue()
|
||||
self.p = None
|
||||
|
||||
|
||||
def process_func(self):
|
||||
self.generator_func = self.generator_func(self.user_param)
|
||||
while True:
|
||||
while True:
|
||||
while self.prefetch > -1:
|
||||
try:
|
||||
gen_data = next (self.generator_func)
|
||||
gen_data = next (self.generator_func)
|
||||
except StopIteration:
|
||||
self.cs_queue.put (None)
|
||||
return
|
||||
|
@ -47,17 +47,17 @@ class SubprocessGenerator(object):
|
|||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
|
||||
def __next__(self):
|
||||
if self.p == None:
|
||||
self.p = multiprocessing.Process(target=self.process_func, args=())
|
||||
self.p.daemon = True
|
||||
self.p.start()
|
||||
|
||||
|
||||
gen_data = self.cs_queue.get()
|
||||
if gen_data is None:
|
||||
self.p.terminate()
|
||||
self.p.join()
|
||||
raise StopIteration()
|
||||
self.sc_queue.put (1)
|
||||
return gen_data
|
||||
self.sc_queue.put (1)
|
||||
return gen_data
|
||||
|
|
|
@ -4,12 +4,12 @@ import sys
|
|||
if sys.platform[0:3] == 'win':
|
||||
from ctypes import windll
|
||||
from ctypes import wintypes
|
||||
|
||||
|
||||
def set_process_lowest_prio():
|
||||
try:
|
||||
if sys.platform[0:3] == 'win':
|
||||
GetCurrentProcess = windll.kernel32.GetCurrentProcess
|
||||
GetCurrentProcess.restype = wintypes.HANDLE
|
||||
GetCurrentProcess.restype = wintypes.HANDLE
|
||||
SetPriorityClass = windll.kernel32.SetPriorityClass
|
||||
SetPriorityClass.argtypes = (wintypes.HANDLE, wintypes.DWORD)
|
||||
SetPriorityClass ( GetCurrentProcess(), 0x00000040 )
|
||||
|
@ -19,7 +19,7 @@ def set_process_lowest_prio():
|
|||
os.nice(20)
|
||||
except:
|
||||
print("Unable to set lowest process priority")
|
||||
|
||||
|
||||
def set_process_dpi_aware():
|
||||
if sys.platform[0:3] == 'win':
|
||||
windll.user32.SetProcessDPIAware(True)
|
||||
windll.user32.SetProcessDPIAware(True)
|
||||
|
|
|
@ -3,12 +3,12 @@ import numpy as np
|
|||
def random_normal( size=(1,), trunc_val = 2.5 ):
|
||||
len = np.array(size).prod()
|
||||
result = np.empty ( (len,) , dtype=np.float32)
|
||||
|
||||
|
||||
for i in range (len):
|
||||
while True:
|
||||
x = np.random.normal()
|
||||
if x >= -trunc_val and x <= trunc_val:
|
||||
break
|
||||
result[i] = (x / trunc_val)
|
||||
|
||||
return result.reshape ( size )
|
||||
|
||||
return result.reshape ( size )
|
||||
|
|
|
@ -11,26 +11,26 @@ class suppress_stdout_stderr(object):
|
|||
|
||||
self.old_stdout_fileno = os.dup ( sys.stdout.fileno() )
|
||||
self.old_stderr_fileno = os.dup ( sys.stderr.fileno() )
|
||||
|
||||
|
||||
self.old_stdout = sys.stdout
|
||||
self.old_stderr = sys.stderr
|
||||
|
||||
|
||||
os.dup2 ( self.outnull_file.fileno(), self.old_stdout_fileno_undup )
|
||||
os.dup2 ( self.errnull_file.fileno(), self.old_stderr_fileno_undup )
|
||||
|
||||
sys.stdout = self.outnull_file
|
||||
|
||||
sys.stdout = self.outnull_file
|
||||
sys.stderr = self.errnull_file
|
||||
return self
|
||||
|
||||
def __exit__(self, *_):
|
||||
|
||||
def __exit__(self, *_):
|
||||
sys.stdout = self.old_stdout
|
||||
sys.stderr = self.old_stderr
|
||||
|
||||
|
||||
os.dup2 ( self.old_stdout_fileno, self.old_stdout_fileno_undup )
|
||||
os.dup2 ( self.old_stderr_fileno, self.old_stderr_fileno_undup )
|
||||
|
||||
os.close ( self.old_stdout_fileno )
|
||||
os.close ( self.old_stderr_fileno )
|
||||
|
||||
|
||||
self.outnull_file.close()
|
||||
self.errnull_file.close()
|
||||
self.errnull_file.close()
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import struct
|
||||
|
||||
def struct_unpack(data, counter, fmt):
|
||||
def struct_unpack(data, counter, fmt):
|
||||
fmt_size = struct.calcsize(fmt)
|
||||
return (counter+fmt_size,) + struct.unpack (fmt, data[counter:counter+fmt_size])
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue