mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 13:02:15 -07:00
old SAE model will not work with this update.
Fixed bug when SAE can be collapsed during a time. SAE: removed CA weights and encoder/decoder dims. added new options: Encoder dims per channel (21-85 ?:help skip:%d) More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU. Decoder dims per channel (11-85 ?:help skip:%d) More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU. Add residual blocks to decoder? (y/n, ?:help skip:n) : These blocks help to get better details, but require more computing time. Remove gray border? (y/n, ?:help skip:n) : Removes gray border of predicted face, but requires more computing resources.
This commit is contained in:
parent
4f4447d719
commit
37505d88e3
12 changed files with 264 additions and 47242 deletions
|
@ -13,6 +13,11 @@ class Converter(object):
|
||||||
self.predictor_func = predictor_func
|
self.predictor_func = predictor_func
|
||||||
self.type = type
|
self.type = type
|
||||||
|
|
||||||
|
#overridable
|
||||||
|
def on_cli_initialize(self):
|
||||||
|
#cli initialization
|
||||||
|
pass
|
||||||
|
|
||||||
#overridable
|
#overridable
|
||||||
def convert_face (self, img_bgr, img_face_landmarks, debug):
|
def convert_face (self, img_bgr, img_face_landmarks, debug):
|
||||||
#return float32 image
|
#return float32 image
|
||||||
|
|
|
@ -19,8 +19,8 @@ class ConverterMasked(Converter):
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def __init__(self, predictor_func,
|
def __init__(self, predictor_func,
|
||||||
predictor_input_size=0,
|
predictor_input_size=0,
|
||||||
output_size=0,
|
output_size=0,
|
||||||
face_type=FaceType.FULL,
|
face_type=FaceType.FULL,
|
||||||
default_mode = 4,
|
default_mode = 4,
|
||||||
base_erode_mask_modifier = 0,
|
base_erode_mask_modifier = 0,
|
||||||
|
@ -28,102 +28,103 @@ class ConverterMasked(Converter):
|
||||||
default_erode_mask_modifier = 0,
|
default_erode_mask_modifier = 0,
|
||||||
default_blur_mask_modifier = 0,
|
default_blur_mask_modifier = 0,
|
||||||
clip_hborder_mask_per = 0):
|
clip_hborder_mask_per = 0):
|
||||||
|
|
||||||
super().__init__(predictor_func, Converter.TYPE_FACE)
|
super().__init__(predictor_func, Converter.TYPE_FACE)
|
||||||
self.predictor_input_size = predictor_input_size
|
self.predictor_input_size = predictor_input_size
|
||||||
self.output_size = output_size
|
self.output_size = output_size
|
||||||
self.face_type = face_type
|
self.face_type = face_type
|
||||||
self.clip_hborder_mask_per = clip_hborder_mask_per
|
self.clip_hborder_mask_per = clip_hborder_mask_per
|
||||||
|
|
||||||
mode = io.input_int ("Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : " % (default_mode) , default_mode)
|
mode = io.input_int ("Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : " % (default_mode) , default_mode)
|
||||||
|
|
||||||
mode_dict = {1:'overlay',
|
mode_dict = {1:'overlay',
|
||||||
2:'hist-match',
|
2:'hist-match',
|
||||||
3:'hist-match-bw',
|
3:'hist-match-bw',
|
||||||
4:'seamless',
|
4:'seamless',
|
||||||
5:'raw'}
|
5:'raw'}
|
||||||
|
|
||||||
self.mode = mode_dict.get (mode, mode_dict[default_mode] )
|
self.mode = mode_dict.get (mode, mode_dict[default_mode] )
|
||||||
self.suppress_seamless_jitter = False
|
self.suppress_seamless_jitter = False
|
||||||
|
|
||||||
if self.mode == 'raw':
|
if self.mode == 'raw':
|
||||||
mode = io.input_int ("Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ", 2)
|
mode = io.input_int ("Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ", 2)
|
||||||
self.raw_mode = {1:'rgb',
|
self.raw_mode = {1:'rgb',
|
||||||
2:'rgb-mask',
|
2:'rgb-mask',
|
||||||
3:'mask-only',
|
3:'mask-only',
|
||||||
4:'predicted-only'}.get (mode, 'rgb-mask')
|
4:'predicted-only'}.get (mode, 'rgb-mask')
|
||||||
|
|
||||||
if self.mode != 'raw':
|
if self.mode != 'raw':
|
||||||
|
|
||||||
if self.mode == 'seamless':
|
if self.mode == 'seamless':
|
||||||
io.input_bool ("Suppress seamless jitter? [ y/n ] (?:help skip:n ) : ", False, help_message="Seamless clone produces face jitter. You can suppress it, but process can take a long time." )
|
io.input_bool ("Suppress seamless jitter? [ y/n ] (?:help skip:n ) : ", False, help_message="Seamless clone produces face jitter. You can suppress it, but process can take a long time." )
|
||||||
|
|
||||||
if io.input_bool("Seamless hist match? (y/n skip:n) : ", False):
|
if io.input_bool("Seamless hist match? (y/n skip:n) : ", False):
|
||||||
self.mode = 'seamless-hist-match'
|
self.mode = 'seamless-hist-match'
|
||||||
|
|
||||||
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
|
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
|
||||||
self.masked_hist_match = io.input_bool("Masked hist match? (y/n skip:y) : ", True)
|
self.masked_hist_match = io.input_bool("Masked hist match? (y/n skip:y) : ", True)
|
||||||
|
|
||||||
if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
|
if self.mode == 'hist-match' or self.mode == 'hist-match-bw' or self.mode == 'seamless-hist-match':
|
||||||
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
|
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
|
||||||
|
|
||||||
if face_type == FaceType.FULL:
|
if face_type == FaceType.FULL:
|
||||||
self.mask_mode = io.input_int ("Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst (?) help. Default - %d : " % (1) , 1, help_message="If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face.")
|
self.mask_mode = io.input_int ("Mask mode: (1) learned, (2) dst, (3) FAN-prd, (4) FAN-dst (?) help. Default - %d : " % (1) , 1, help_message="If you learned mask, then option 1 should be choosed. 'dst' mask is raw shaky mask from dst aligned images. 'FAN-prd' - using super smooth mask by pretrained FAN-model from predicted face. 'FAN-dst' - using super smooth mask by pretrained FAN-model from dst face.")
|
||||||
else:
|
else:
|
||||||
self.mask_mode = io.input_int ("Mask mode: (1) learned, (2) dst . Default - %d : " % (1) , 1)
|
self.mask_mode = io.input_int ("Mask mode: (1) learned, (2) dst . Default - %d : " % (1) , 1)
|
||||||
|
|
||||||
if self.mask_mode == 3 or self.mask_mode == 4:
|
if self.mask_mode == 3 or self.mask_mode == 4:
|
||||||
self.fan_seg = None
|
self.fan_seg = None
|
||||||
|
|
||||||
if self.mode != 'raw':
|
if self.mode != 'raw':
|
||||||
self.erode_mask_modifier = base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200)
|
self.erode_mask_modifier = base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200)
|
||||||
self.blur_mask_modifier = base_blur_mask_modifier + np.clip ( io.input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200)
|
self.blur_mask_modifier = base_blur_mask_modifier + np.clip ( io.input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200)
|
||||||
|
|
||||||
self.seamless_erode_mask_modifier = 0
|
self.seamless_erode_mask_modifier = 0
|
||||||
if 'seamless' in self.mode:
|
if 'seamless' in self.mode:
|
||||||
self.seamless_erode_mask_modifier = np.clip ( io.input_int ("Choose seamless erode mask modifier [-100..100] (skip:0) : ", 0), -100, 100)
|
self.seamless_erode_mask_modifier = np.clip ( io.input_int ("Choose seamless erode mask modifier [-100..100] (skip:0) : ", 0), -100, 100)
|
||||||
|
|
||||||
self.output_face_scale = np.clip ( 1.0 + io.input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0)*0.01, 0.5, 1.5)
|
self.output_face_scale = np.clip ( 1.0 + io.input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0)*0.01, 0.5, 1.5)
|
||||||
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
|
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
|
||||||
|
|
||||||
if self.mode != 'raw':
|
if self.mode != 'raw':
|
||||||
self.final_image_color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
|
self.final_image_color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
|
||||||
self.alpha = io.input_bool("Export png with alpha channel? (y/n skip:n) : ", False)
|
self.alpha = io.input_bool("Export png with alpha channel? (y/n skip:n) : ", False)
|
||||||
|
|
||||||
io.log_info ("")
|
io.log_info ("")
|
||||||
self.over_res = 4 if self.suppress_seamless_jitter else 1
|
self.over_res = 4 if self.suppress_seamless_jitter else 1
|
||||||
|
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def dummy_predict(self):
|
def dummy_predict(self):
|
||||||
self.predictor_func ( np.zeros ( (self.predictor_input_size,self.predictor_input_size,4), dtype=np.float32 ) )
|
self.predictor_func ( np.zeros ( (self.predictor_input_size,self.predictor_input_size,4), dtype=np.float32 ) )
|
||||||
|
|
||||||
#override
|
#overridable
|
||||||
def convert_face (self, img_bgr, img_face_landmarks, debug):
|
def on_cli_initialize(self):
|
||||||
if (self.mask_mode == 3 or self.mask_mode == 4) and self.fan_seg == None:
|
if (self.mask_mode == 3 or self.mask_mode == 4) and self.fan_seg == None:
|
||||||
self.fan_seg = FANSegmentator(256, FaceType.toString(FaceType.FULL) )
|
self.fan_seg = FANSegmentator(256, FaceType.toString(FaceType.FULL) )
|
||||||
|
|
||||||
|
#override
|
||||||
|
def convert_face (self, img_bgr, img_face_landmarks, debug):
|
||||||
if self.over_res != 1:
|
if self.over_res != 1:
|
||||||
img_bgr = cv2.resize ( img_bgr, ( img_bgr.shape[1]*self.over_res, img_bgr.shape[0]*self.over_res ) )
|
img_bgr = cv2.resize ( img_bgr, ( img_bgr.shape[1]*self.over_res, img_bgr.shape[0]*self.over_res ) )
|
||||||
img_face_landmarks = img_face_landmarks*self.over_res
|
img_face_landmarks = img_face_landmarks*self.over_res
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs = [img_bgr.copy()]
|
debugs = [img_bgr.copy()]
|
||||||
|
|
||||||
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
img_size = img_bgr.shape[1], img_bgr.shape[0]
|
||||||
|
|
||||||
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
|
||||||
|
|
||||||
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.output_size, face_type=self.face_type)
|
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.output_size, face_type=self.face_type)
|
||||||
face_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.output_size, face_type=self.face_type, scale=self.output_face_scale)
|
face_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.output_size, face_type=self.face_type, scale=self.output_face_scale)
|
||||||
|
|
||||||
dst_face_bgr = cv2.warpAffine( img_bgr , face_mat, (self.output_size, self.output_size), flags=cv2.INTER_LANCZOS4 )
|
dst_face_bgr = cv2.warpAffine( img_bgr , face_mat, (self.output_size, self.output_size), flags=cv2.INTER_LANCZOS4 )
|
||||||
dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (self.output_size, self.output_size), flags=cv2.INTER_LANCZOS4 )
|
dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (self.output_size, self.output_size), flags=cv2.INTER_LANCZOS4 )
|
||||||
|
|
||||||
predictor_input_bgr = cv2.resize (dst_face_bgr, (self.predictor_input_size,self.predictor_input_size))
|
predictor_input_bgr = cv2.resize (dst_face_bgr, (self.predictor_input_size,self.predictor_input_size))
|
||||||
predictor_input_mask_a_0 = cv2.resize (dst_face_mask_a_0, (self.predictor_input_size,self.predictor_input_size))
|
predictor_input_mask_a_0 = cv2.resize (dst_face_mask_a_0, (self.predictor_input_size,self.predictor_input_size))
|
||||||
predictor_input_mask_a = np.expand_dims (predictor_input_mask_a_0, -1)
|
predictor_input_mask_a = np.expand_dims (predictor_input_mask_a_0, -1)
|
||||||
|
|
||||||
predicted_bgra = self.predictor_func ( np.concatenate( (predictor_input_bgr, predictor_input_mask_a), -1) )
|
predicted_bgra = self.predictor_func ( np.concatenate( (predictor_input_bgr, predictor_input_mask_a), -1) )
|
||||||
|
|
||||||
prd_face_bgr = np.clip (predicted_bgra[:,:,0:3], 0, 1.0 )
|
prd_face_bgr = np.clip (predicted_bgra[:,:,0:3], 0, 1.0 )
|
||||||
|
@ -132,7 +133,7 @@ class ConverterMasked(Converter):
|
||||||
if self.mask_mode == 2: #dst
|
if self.mask_mode == 2: #dst
|
||||||
prd_face_mask_a_0 = predictor_input_mask_a_0
|
prd_face_mask_a_0 = predictor_input_mask_a_0
|
||||||
elif self.mask_mode == 3: #FAN-prd
|
elif self.mask_mode == 3: #FAN-prd
|
||||||
prd_face_bgr_256 = cv2.resize (prd_face_bgr, (256,256) )
|
prd_face_bgr_256 = cv2.resize (prd_face_bgr, (256,256) )
|
||||||
prd_face_bgr_256_mask = self.fan_seg.extract_from_bgr( np.expand_dims(prd_face_bgr_256,0) ) [0]
|
prd_face_bgr_256_mask = self.fan_seg.extract_from_bgr( np.expand_dims(prd_face_bgr_256,0) ) [0]
|
||||||
prd_face_mask_a_0 = cv2.resize (prd_face_bgr_256_mask, (self.predictor_input_size, self.predictor_input_size))
|
prd_face_mask_a_0 = cv2.resize (prd_face_bgr_256_mask, (self.predictor_input_size, self.predictor_input_size))
|
||||||
elif self.mask_mode == 4: #FAN-dst
|
elif self.mask_mode == 4: #FAN-dst
|
||||||
|
@ -140,19 +141,19 @@ class ConverterMasked(Converter):
|
||||||
dst_face_256_bgr = cv2.warpAffine(img_bgr, face_256_mat, (256, 256), flags=cv2.INTER_LANCZOS4 )
|
dst_face_256_bgr = cv2.warpAffine(img_bgr, face_256_mat, (256, 256), flags=cv2.INTER_LANCZOS4 )
|
||||||
dst_face_256_mask = self.fan_seg.extract_from_bgr( np.expand_dims(dst_face_256_bgr,0) ) [0]
|
dst_face_256_mask = self.fan_seg.extract_from_bgr( np.expand_dims(dst_face_256_bgr,0) ) [0]
|
||||||
prd_face_mask_a_0 = cv2.resize (dst_face_256_mask, (self.predictor_input_size, self.predictor_input_size))
|
prd_face_mask_a_0 = cv2.resize (dst_face_256_mask, (self.predictor_input_size, self.predictor_input_size))
|
||||||
|
|
||||||
prd_face_mask_a_0[ prd_face_mask_a_0 < 0.001 ] = 0.0
|
prd_face_mask_a_0[ prd_face_mask_a_0 < 0.001 ] = 0.0
|
||||||
|
|
||||||
prd_face_mask_a = np.expand_dims (prd_face_mask_a_0, axis=-1)
|
prd_face_mask_a = np.expand_dims (prd_face_mask_a_0, axis=-1)
|
||||||
prd_face_mask_aaa = np.repeat (prd_face_mask_a, (3,), axis=-1)
|
prd_face_mask_aaa = np.repeat (prd_face_mask_a, (3,), axis=-1)
|
||||||
|
|
||||||
img_face_mask_aaa = cv2.warpAffine( prd_face_mask_aaa, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
|
img_face_mask_aaa = cv2.warpAffine( prd_face_mask_aaa, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
|
||||||
img_face_mask_aaa = np.clip (img_face_mask_aaa, 0.0, 1.0)
|
img_face_mask_aaa = np.clip (img_face_mask_aaa, 0.0, 1.0)
|
||||||
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0 #get rid of noise
|
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0 #get rid of noise
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [img_face_mask_aaa.copy()]
|
debugs += [img_face_mask_aaa.copy()]
|
||||||
|
|
||||||
if 'seamless' in self.mode:
|
if 'seamless' in self.mode:
|
||||||
#mask used for cv2.seamlessClone
|
#mask used for cv2.seamlessClone
|
||||||
img_face_seamless_mask_aaa = None
|
img_face_seamless_mask_aaa = None
|
||||||
|
@ -163,26 +164,26 @@ class ConverterMasked(Converter):
|
||||||
img_face_seamless_mask_aaa = img_face_mask_aaa.copy()
|
img_face_seamless_mask_aaa = img_face_mask_aaa.copy()
|
||||||
img_face_seamless_mask_aaa[a] = 1.0
|
img_face_seamless_mask_aaa[a] = 1.0
|
||||||
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= i / 10.0] = 0.0
|
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= i / 10.0] = 0.0
|
||||||
|
|
||||||
out_img = img_bgr.copy()
|
out_img = img_bgr.copy()
|
||||||
|
|
||||||
if self.mode == 'raw':
|
if self.mode == 'raw':
|
||||||
if self.raw_mode == 'rgb' or self.raw_mode == 'rgb-mask':
|
if self.raw_mode == 'rgb' or self.raw_mode == 'rgb-mask':
|
||||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||||
|
|
||||||
if self.raw_mode == 'rgb-mask':
|
if self.raw_mode == 'rgb-mask':
|
||||||
out_img = np.concatenate ( [out_img, np.expand_dims (img_face_mask_aaa[:,:,0],-1)], -1 )
|
out_img = np.concatenate ( [out_img, np.expand_dims (img_face_mask_aaa[:,:,0],-1)], -1 )
|
||||||
|
|
||||||
if self.raw_mode == 'mask-only':
|
if self.raw_mode == 'mask-only':
|
||||||
out_img = img_face_mask_aaa
|
out_img = img_face_mask_aaa
|
||||||
|
|
||||||
if self.raw_mode == 'predicted-only':
|
if self.raw_mode == 'predicted-only':
|
||||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(out_img.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(out_img.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||||
|
|
||||||
elif ('seamless' not in self.mode) or (img_face_seamless_mask_aaa is not None):
|
elif ('seamless' not in self.mode) or (img_face_seamless_mask_aaa is not None):
|
||||||
#averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
|
#averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
|
||||||
ar = []
|
ar = []
|
||||||
for i in range(1, 10):
|
for i in range(1, 10):
|
||||||
maxregion = np.argwhere( img_face_mask_aaa > i / 10.0 )
|
maxregion = np.argwhere( img_face_mask_aaa > i / 10.0 )
|
||||||
if maxregion.size != 0:
|
if maxregion.size != 0:
|
||||||
miny,minx = maxregion.min(axis=0)[:2]
|
miny,minx = maxregion.min(axis=0)[:2]
|
||||||
|
@ -193,32 +194,32 @@ class ConverterMasked(Converter):
|
||||||
masky = ( miny+(leny/2) )
|
masky = ( miny+(leny/2) )
|
||||||
if lenx >= 4 and leny >= 4:
|
if lenx >= 4 and leny >= 4:
|
||||||
ar += [ [ lenx, leny, maskx, masky] ]
|
ar += [ [ lenx, leny, maskx, masky] ]
|
||||||
|
|
||||||
if len(ar) > 0:
|
if len(ar) > 0:
|
||||||
lenx, leny, maskx, masky = np.mean ( ar, axis=0 )
|
lenx, leny, maskx, masky = np.mean ( ar, axis=0 )
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
io.log_info ("lenx/leny:(%d/%d) maskx/masky:(%f/%f)" % (lenx, leny, maskx, masky ) )
|
io.log_info ("lenx/leny:(%d/%d) maskx/masky:(%f/%f)" % (lenx, leny, maskx, masky ) )
|
||||||
|
|
||||||
maskx = int( maskx )
|
maskx = int( maskx )
|
||||||
masky = int( masky )
|
masky = int( masky )
|
||||||
|
|
||||||
lowest_len = min (lenx, leny)
|
lowest_len = min (lenx, leny)
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
io.log_info ("lowest_len = %f" % (lowest_len) )
|
io.log_info ("lowest_len = %f" % (lowest_len) )
|
||||||
|
|
||||||
img_mask_blurry_aaa = img_face_mask_aaa
|
img_mask_blurry_aaa = img_face_mask_aaa
|
||||||
|
|
||||||
if self.erode_mask_modifier != 0:
|
if self.erode_mask_modifier != 0:
|
||||||
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.erode_mask_modifier )
|
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.erode_mask_modifier )
|
||||||
if debug:
|
if debug:
|
||||||
io.log_info ("erode_size = %d" % (ero) )
|
io.log_info ("erode_size = %d" % (ero) )
|
||||||
if ero > 0:
|
if ero > 0:
|
||||||
img_mask_blurry_aaa = cv2.erode(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
img_mask_blurry_aaa = cv2.erode(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
|
||||||
elif ero < 0:
|
elif ero < 0:
|
||||||
img_mask_blurry_aaa = cv2.dilate(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
|
img_mask_blurry_aaa = cv2.dilate(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
|
||||||
|
|
||||||
if self.seamless_erode_mask_modifier != 0:
|
if self.seamless_erode_mask_modifier != 0:
|
||||||
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.seamless_erode_mask_modifier )
|
ero = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.seamless_erode_mask_modifier )
|
||||||
if debug:
|
if debug:
|
||||||
|
@ -228,39 +229,39 @@ class ConverterMasked(Converter):
|
||||||
elif ero < 0:
|
elif ero < 0:
|
||||||
img_face_seamless_mask_aaa = cv2.dilate(img_face_seamless_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
|
img_face_seamless_mask_aaa = cv2.dilate(img_face_seamless_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )
|
||||||
img_face_seamless_mask_aaa = np.clip (img_face_seamless_mask_aaa, 0, 1)
|
img_face_seamless_mask_aaa = np.clip (img_face_seamless_mask_aaa, 0, 1)
|
||||||
|
|
||||||
if self.clip_hborder_mask_per > 0: #clip hborder before blur
|
if self.clip_hborder_mask_per > 0: #clip hborder before blur
|
||||||
prd_hborder_rect_mask_a = np.ones ( prd_face_mask_a.shape, dtype=np.float32)
|
prd_hborder_rect_mask_a = np.ones ( prd_face_mask_a.shape, dtype=np.float32)
|
||||||
prd_border_size = int ( prd_hborder_rect_mask_a.shape[1] * self.clip_hborder_mask_per )
|
prd_border_size = int ( prd_hborder_rect_mask_a.shape[1] * self.clip_hborder_mask_per )
|
||||||
prd_hborder_rect_mask_a[:,0:prd_border_size,:] = 0
|
prd_hborder_rect_mask_a[:,0:prd_border_size,:] = 0
|
||||||
prd_hborder_rect_mask_a[:,-prd_border_size:,:] = 0
|
prd_hborder_rect_mask_a[:,-prd_border_size:,:] = 0
|
||||||
prd_hborder_rect_mask_a = np.expand_dims(cv2.blur(prd_hborder_rect_mask_a, (prd_border_size, prd_border_size) ),-1)
|
prd_hborder_rect_mask_a = np.expand_dims(cv2.blur(prd_hborder_rect_mask_a, (prd_border_size, prd_border_size) ),-1)
|
||||||
|
|
||||||
img_prd_hborder_rect_mask_a = cv2.warpAffine( prd_hborder_rect_mask_a, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
|
img_prd_hborder_rect_mask_a = cv2.warpAffine( prd_hborder_rect_mask_a, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
|
||||||
img_prd_hborder_rect_mask_a = np.expand_dims (img_prd_hborder_rect_mask_a, -1)
|
img_prd_hborder_rect_mask_a = np.expand_dims (img_prd_hborder_rect_mask_a, -1)
|
||||||
img_mask_blurry_aaa *= img_prd_hborder_rect_mask_a
|
img_mask_blurry_aaa *= img_prd_hborder_rect_mask_a
|
||||||
img_mask_blurry_aaa = np.clip( img_mask_blurry_aaa, 0, 1.0 )
|
img_mask_blurry_aaa = np.clip( img_mask_blurry_aaa, 0, 1.0 )
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [img_mask_blurry_aaa.copy()]
|
debugs += [img_mask_blurry_aaa.copy()]
|
||||||
|
|
||||||
if self.blur_mask_modifier > 0:
|
if self.blur_mask_modifier > 0:
|
||||||
blur = int( lowest_len * 0.10 * 0.01*self.blur_mask_modifier )
|
blur = int( lowest_len * 0.10 * 0.01*self.blur_mask_modifier )
|
||||||
if debug:
|
if debug:
|
||||||
io.log_info ("blur_size = %d" % (blur) )
|
io.log_info ("blur_size = %d" % (blur) )
|
||||||
if blur > 0:
|
if blur > 0:
|
||||||
img_mask_blurry_aaa = cv2.blur(img_mask_blurry_aaa, (blur, blur) )
|
img_mask_blurry_aaa = cv2.blur(img_mask_blurry_aaa, (blur, blur) )
|
||||||
|
|
||||||
img_mask_blurry_aaa = np.clip( img_mask_blurry_aaa, 0, 1.0 )
|
img_mask_blurry_aaa = np.clip( img_mask_blurry_aaa, 0, 1.0 )
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [img_mask_blurry_aaa.copy()]
|
debugs += [img_mask_blurry_aaa.copy()]
|
||||||
|
|
||||||
if self.color_transfer_mode is not None:
|
if self.color_transfer_mode is not None:
|
||||||
if self.color_transfer_mode == 'rct':
|
if self.color_transfer_mode == 'rct':
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
||||||
|
|
||||||
prd_face_bgr = image_utils.reinhard_color_transfer ( np.clip( (prd_face_bgr*255).astype(np.uint8), 0, 255),
|
prd_face_bgr = image_utils.reinhard_color_transfer ( np.clip( (prd_face_bgr*255).astype(np.uint8), 0, 255),
|
||||||
np.clip( (dst_face_bgr*255).astype(np.uint8), 0, 255),
|
np.clip( (dst_face_bgr*255).astype(np.uint8), 0, 255),
|
||||||
source_mask=prd_face_mask_a, target_mask=prd_face_mask_a)
|
source_mask=prd_face_mask_a, target_mask=prd_face_mask_a)
|
||||||
|
@ -268,96 +269,95 @@ class ConverterMasked(Converter):
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
||||||
|
|
||||||
|
|
||||||
elif self.color_transfer_mode == 'lct':
|
elif self.color_transfer_mode == 'lct':
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
||||||
|
|
||||||
prd_face_bgr = image_utils.linear_color_transfer (prd_face_bgr, dst_face_bgr)
|
prd_face_bgr = image_utils.linear_color_transfer (prd_face_bgr, dst_face_bgr)
|
||||||
prd_face_bgr = np.clip( prd_face_bgr, 0.0, 1.0)
|
prd_face_bgr = np.clip( prd_face_bgr, 0.0, 1.0)
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
|
||||||
|
|
||||||
if self.mode == 'hist-match-bw':
|
if self.mode == 'hist-match-bw':
|
||||||
prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
|
prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
|
||||||
prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )
|
prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )
|
||||||
|
|
||||||
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
|
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [ cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ) ]
|
debugs += [ cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ) ]
|
||||||
|
|
||||||
hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
||||||
|
|
||||||
if self.masked_hist_match:
|
if self.masked_hist_match:
|
||||||
hist_mask_a *= prd_face_mask_a
|
hist_mask_a *= prd_face_mask_a
|
||||||
|
|
||||||
hist_match_1 = prd_face_bgr*hist_mask_a + (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
hist_match_1 = prd_face_bgr*hist_mask_a + (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
||||||
hist_match_1[ hist_match_1 > 1.0 ] = 1.0
|
hist_match_1[ hist_match_1 > 1.0 ] = 1.0
|
||||||
|
|
||||||
hist_match_2 = dst_face_bgr*hist_mask_a + (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
hist_match_2 = dst_face_bgr*hist_mask_a + (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)
|
||||||
hist_match_2[ hist_match_1 > 1.0 ] = 1.0
|
hist_match_2[ hist_match_1 > 1.0 ] = 1.0
|
||||||
|
|
||||||
prd_face_bgr = image_utils.color_hist_match(hist_match_1, hist_match_2, self.hist_match_threshold )
|
prd_face_bgr = image_utils.color_hist_match(hist_match_1, hist_match_2, self.hist_match_threshold )
|
||||||
|
|
||||||
if self.mode == 'hist-match-bw':
|
if self.mode == 'hist-match-bw':
|
||||||
prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)
|
prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)
|
||||||
|
|
||||||
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||||
out_img = np.clip(out_img, 0.0, 1.0)
|
out_img = np.clip(out_img, 0.0, 1.0)
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [out_img.copy()]
|
debugs += [out_img.copy()]
|
||||||
|
|
||||||
if self.mode == 'overlay':
|
if self.mode == 'overlay':
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if 'seamless' in self.mode:
|
if 'seamless' in self.mode:
|
||||||
try:
|
try:
|
||||||
out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), (img_bgr*255).astype(np.uint8), (img_face_seamless_mask_aaa*255).astype(np.uint8), (maskx,masky) , cv2.NORMAL_CLONE )
|
out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), (img_bgr*255).astype(np.uint8), (img_face_seamless_mask_aaa*255).astype(np.uint8), (maskx,masky) , cv2.NORMAL_CLONE )
|
||||||
out_img = out_img.astype(dtype=np.float32) / 255.0
|
out_img = out_img.astype(dtype=np.float32) / 255.0
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
#seamlessClone may fail in some cases
|
#seamlessClone may fail in some cases
|
||||||
e_str = traceback.format_exc()
|
e_str = traceback.format_exc()
|
||||||
|
|
||||||
if 'MemoryError' in e_str:
|
if 'MemoryError' in e_str:
|
||||||
raise Exception("Seamless fail: " + e_str) #reraise MemoryError in order to reprocess this data by other processes
|
raise Exception("Seamless fail: " + e_str) #reraise MemoryError in order to reprocess this data by other processes
|
||||||
else:
|
else:
|
||||||
print ("Seamless fail: " + e_str)
|
print ("Seamless fail: " + e_str)
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [out_img.copy()]
|
debugs += [out_img.copy()]
|
||||||
|
|
||||||
out_img = np.clip( img_bgr*(1-img_mask_blurry_aaa) + (out_img*img_mask_blurry_aaa) , 0, 1.0 )
|
out_img = np.clip( img_bgr*(1-img_mask_blurry_aaa) + (out_img*img_mask_blurry_aaa) , 0, 1.0 )
|
||||||
|
|
||||||
if self.mode == 'seamless-hist-match':
|
if self.mode == 'seamless-hist-match':
|
||||||
out_face_bgr = cv2.warpAffine( out_img, face_mat, (self.output_size, self.output_size) )
|
out_face_bgr = cv2.warpAffine( out_img, face_mat, (self.output_size, self.output_size) )
|
||||||
new_out_face_bgr = image_utils.color_hist_match(out_face_bgr, dst_face_bgr, self.hist_match_threshold)
|
new_out_face_bgr = image_utils.color_hist_match(out_face_bgr, dst_face_bgr, self.hist_match_threshold)
|
||||||
new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
|
||||||
out_img = np.clip( img_bgr*(1-img_mask_blurry_aaa) + (new_out*img_mask_blurry_aaa) , 0, 1.0 )
|
out_img = np.clip( img_bgr*(1-img_mask_blurry_aaa) + (new_out*img_mask_blurry_aaa) , 0, 1.0 )
|
||||||
|
|
||||||
if self.final_image_color_degrade_power != 0:
|
if self.final_image_color_degrade_power != 0:
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [out_img.copy()]
|
debugs += [out_img.copy()]
|
||||||
out_img_reduced = image_utils.reduce_colors(out_img, 256)
|
out_img_reduced = image_utils.reduce_colors(out_img, 256)
|
||||||
if self.final_image_color_degrade_power == 100:
|
if self.final_image_color_degrade_power == 100:
|
||||||
out_img = out_img_reduced
|
out_img = out_img_reduced
|
||||||
else:
|
else:
|
||||||
alpha = self.final_image_color_degrade_power / 100.0
|
alpha = self.final_image_color_degrade_power / 100.0
|
||||||
out_img = (out_img*(1.0-alpha) + out_img_reduced*alpha)
|
out_img = (out_img*(1.0-alpha) + out_img_reduced*alpha)
|
||||||
|
|
||||||
if self.alpha:
|
if self.alpha:
|
||||||
out_img = np.concatenate ( [out_img, np.expand_dims (img_mask_blurry_aaa[:,:,0],-1)], -1 )
|
out_img = np.concatenate ( [out_img, np.expand_dims (img_mask_blurry_aaa[:,:,0],-1)], -1 )
|
||||||
|
|
||||||
if self.over_res != 1:
|
if self.over_res != 1:
|
||||||
out_img = cv2.resize ( out_img, ( img_bgr.shape[1] // self.over_res, img_bgr.shape[0] // self.over_res ) )
|
out_img = cv2.resize ( out_img, ( img_bgr.shape[1] // self.over_res, img_bgr.shape[0] // self.over_res ) )
|
||||||
|
|
||||||
out_img = np.clip (out_img, 0.0, 1.0 )
|
out_img = np.clip (out_img, 0.0, 1.0 )
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
debugs += [out_img.copy()]
|
debugs += [out_img.copy()]
|
||||||
|
|
||||||
return debugs if debug else out_img
|
|
||||||
|
|
||||||
|
return debugs if debug else out_img
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
Windows builds with all dependencies included are released regularly. Only the NVIDIA GeForce display driver needs to be installed. Prebuilt DeepFaceLab, including GPU and CPU versions, can be downloaded from
|
Windows builds with all dependencies included are released regularly. Only the NVIDIA GeForce display driver needs to be installed. Prebuilt DeepFaceLab, including GPU and CPU versions, can be downloaded from
|
||||||
|
|
||||||
[Mega](https://mega.nz/#F!b9MzCK4B!zEAG9txu7uaRUjXz9PtBqg) or [BitTorrent](https://rutracker.org/forum/viewtopic.php?p=75318742) (magnet link inside).
|
[Mega](https://mega.nz/#F!b9MzCK4B!zEAG9txu7uaRUjXz9PtBqg)
|
||||||
|
|
||||||
Available builds:
|
Available builds:
|
||||||
|
|
||||||
|
|
BIN
doc/manual_en_google_translated.docx
Normal file
BIN
doc/manual_en_google_translated.docx
Normal file
Binary file not shown.
BIN
doc/manual_en_google_translated.pdf
Normal file
BIN
doc/manual_en_google_translated.pdf
Normal file
Binary file not shown.
Binary file not shown.
BIN
doc/manual_ru_source.docx
Normal file
BIN
doc/manual_ru_source.docx
Normal file
Binary file not shown.
46892
doc/manual_ru_source.xml
46892
doc/manual_ru_source.xml
File diff suppressed because one or more lines are too long
2
main.py
2
main.py
|
@ -136,7 +136,7 @@ if __name__ == "__main__":
|
||||||
p = videoed_parser.add_parser( "extract-video", help="Extract images from video file.")
|
p = videoed_parser.add_parser( "extract-video", help="Extract images from video file.")
|
||||||
p.add_argument('--input-file', required=True, action=fixPathAction, dest="input_file", help="Input file to be processed. Specify .*-extension to find first file.")
|
p.add_argument('--input-file', required=True, action=fixPathAction, dest="input_file", help="Input file to be processed. Specify .*-extension to find first file.")
|
||||||
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the extracted images will be stored.")
|
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the extracted images will be stored.")
|
||||||
p.add_argument('--ouptut-ext', dest="output_ext", default='png', help="Image format (extension) of output files.")
|
p.add_argument('--ouptut-ext', dest="output_ext", default=None, help="Image format (extension) of output files.")
|
||||||
p.add_argument('--fps', type=int, dest="fps", default=None, help="How many frames of every second of the video will be extracted. 0 - full fps.")
|
p.add_argument('--fps', type=int, dest="fps", default=None, help="How many frames of every second of the video will be extracted. 0 - full fps.")
|
||||||
p.set_defaults(func=process_videoed_extract_video)
|
p.set_defaults(func=process_videoed_extract_video)
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,8 @@ class ConvertSubprocessor(Subprocessor):
|
||||||
#therefore forcing active_DeviceConfig to CPU only
|
#therefore forcing active_DeviceConfig to CPU only
|
||||||
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)
|
||||||
|
|
||||||
|
self.converter.on_cli_initialize()
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
#override
|
#override
|
||||||
|
|
|
@ -50,32 +50,36 @@ class SAEModel(ModelBase):
|
||||||
self.options['optimizer_mode'] = self.options.get('optimizer_mode', 1)
|
self.options['optimizer_mode'] = self.options.get('optimizer_mode', 1)
|
||||||
|
|
||||||
if is_first_run:
|
if is_first_run:
|
||||||
self.options['archi'] = io.input_str ("AE architecture (df, liae, vg ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae','vg'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes. 'vg' - currently testing.").lower()
|
self.options['archi'] = io.input_str ("AE architecture (df, liae ?:help skip:%s) : " % (default_archi) , default_archi, ['df','liae'], help_message="'df' keeps faces more natural. 'liae' can fix overly different face shapes.").lower()
|
||||||
else:
|
else:
|
||||||
self.options['archi'] = self.options.get('archi', default_archi)
|
self.options['archi'] = self.options.get('archi', default_archi)
|
||||||
|
|
||||||
default_ae_dims = 256 if self.options['archi'] == 'liae' else 512
|
default_ae_dims = 256 if self.options['archi'] == 'liae' else 512
|
||||||
default_ed_ch_dims = 42
|
default_e_ch_dims = 42
|
||||||
def_ca_weights = False
|
default_d_ch_dims = default_e_ch_dims // 2
|
||||||
|
|
||||||
if is_first_run:
|
if is_first_run:
|
||||||
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
|
self.options['ae_dims'] = np.clip ( io.input_int("AutoEncoder dims (32-1024 ?:help skip:%d) : " % (default_ae_dims) , default_ae_dims, help_message="All face information will packed to AE dims. If amount of AE dims are not enough, then for example closed eyes will not be recognized. More dims are better, but require more VRAM. You can fine-tune model size to fit your GPU." ), 32, 1024 )
|
||||||
self.options['ed_ch_dims'] = np.clip ( io.input_int("Encoder/Decoder dims per channel (21-85 ?:help skip:%d) : " % (default_ed_ch_dims) , default_ed_ch_dims, help_message="More dims are better, but requires more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 )
|
self.options['e_ch_dims'] = np.clip ( io.input_int("Encoder dims per channel (21-85 ?:help skip:%d) : " % (default_e_ch_dims) , default_e_ch_dims, help_message="More encoder dims help to recognize more facial features, but require more VRAM. You can fine-tune model size to fit your GPU." ), 21, 85 )
|
||||||
self.options['ca_weights'] = io.input_bool ("Use CA weights? (y/n, ?:help skip: %s ) : " % (yn_str[def_ca_weights]), def_ca_weights, help_message="Initialize network with 'Convolution Aware' weights. This may help to achieve a higher accuracy model, but consumes time at first run and sometime cause model collapse.")
|
default_d_ch_dims = self.options['e_ch_dims'] // 2
|
||||||
|
self.options['d_ch_dims'] = np.clip ( io.input_int("Decoder dims per channel (10-85 ?:help skip:%d) : " % (default_d_ch_dims) , default_d_ch_dims, help_message="More decoder dims help to get better details, but require more VRAM. You can fine-tune model size to fit your GPU." ), 10, 85 )
|
||||||
|
self.options['d_residual_blocks'] = io.input_bool ("Add residual blocks to decoder? (y/n, ?:help skip:n) : ", False, help_message="These blocks help to get better details, but require more computing time.")
|
||||||
|
self.options['remove_gray_border'] = io.input_bool ("Remove gray border? (y/n, ?:help skip:n) : ", False, help_message="Removes gray border of predicted face, but requires more computing resources.")
|
||||||
else:
|
else:
|
||||||
self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims)
|
self.options['ae_dims'] = self.options.get('ae_dims', default_ae_dims)
|
||||||
self.options['ed_ch_dims'] = self.options.get('ed_ch_dims', default_ed_ch_dims)
|
self.options['e_ch_dims'] = self.options.get('e_ch_dims', default_e_ch_dims)
|
||||||
self.options['ca_weights'] = self.options.get('ca_weights', def_ca_weights)
|
self.options['d_ch_dims'] = self.options.get('d_ch_dims', default_d_ch_dims)
|
||||||
|
self.options['d_residual_blocks'] = self.options.get('d_residual_blocks', False)
|
||||||
|
self.options['remove_gray_border'] = self.options.get('remove_gray_border', False)
|
||||||
|
|
||||||
if is_first_run:
|
if is_first_run:
|
||||||
self.options['lighter_encoder'] = io.input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")
|
self.options['lighter_encoder'] = io.input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")
|
||||||
|
|
||||||
if self.options['archi'] != 'vg':
|
self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:n) : ", False, help_message="Multiscale decoder helps to get better details.")
|
||||||
self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:n) : ", False, help_message="Multiscale decoder helps to get better details.")
|
|
||||||
else:
|
else:
|
||||||
self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)
|
self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)
|
||||||
|
|
||||||
if self.options['archi'] != 'vg':
|
self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', False)
|
||||||
self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', False)
|
|
||||||
|
|
||||||
default_face_style_power = 0.0
|
default_face_style_power = 0.0
|
||||||
default_bg_style_power = 0.0
|
default_bg_style_power = 0.0
|
||||||
|
@ -103,11 +107,13 @@ class SAEModel(ModelBase):
|
||||||
|
|
||||||
resolution = self.options['resolution']
|
resolution = self.options['resolution']
|
||||||
ae_dims = self.options['ae_dims']
|
ae_dims = self.options['ae_dims']
|
||||||
ed_ch_dims = self.options['ed_ch_dims']
|
e_ch_dims = self.options['e_ch_dims']
|
||||||
|
d_ch_dims = self.options['d_ch_dims']
|
||||||
|
d_residual_blocks = self.options['d_residual_blocks']
|
||||||
bgr_shape = (resolution, resolution, 3)
|
bgr_shape = (resolution, resolution, 3)
|
||||||
mask_shape = (resolution, resolution, 1)
|
mask_shape = (resolution, resolution, 1)
|
||||||
|
|
||||||
self.ms_count = ms_count = 3 if (self.options['archi'] != 'vg' and self.options['multiscale_decoder']) else 1
|
self.ms_count = ms_count = 3 if (self.options['multiscale_decoder']) else 1
|
||||||
|
|
||||||
masked_training = True
|
masked_training = True
|
||||||
|
|
||||||
|
@ -124,26 +130,27 @@ class SAEModel(ModelBase):
|
||||||
target_dst_ar = [ Input ( ( bgr_shape[0] // (2**i) ,)*2 + (bgr_shape[-1],) ) for i in range(ms_count-1, -1, -1)]
|
target_dst_ar = [ Input ( ( bgr_shape[0] // (2**i) ,)*2 + (bgr_shape[-1],) ) for i in range(ms_count-1, -1, -1)]
|
||||||
target_dstm_ar = [ Input ( ( mask_shape[0] // (2**i) ,)*2 + (mask_shape[-1],) ) for i in range(ms_count-1, -1, -1)]
|
target_dstm_ar = [ Input ( ( mask_shape[0] // (2**i) ,)*2 + (mask_shape[-1],) ) for i in range(ms_count-1, -1, -1)]
|
||||||
|
|
||||||
use_bn = False
|
padding = 'reflect' if self.options['remove_gray_border'] else 'zero'
|
||||||
|
common_flow_kwargs = { 'padding': padding }
|
||||||
|
|
||||||
models_list = []
|
models_list = []
|
||||||
weights_to_load = []
|
weights_to_load = []
|
||||||
if self.options['archi'] == 'liae':
|
if self.options['archi'] == 'liae':
|
||||||
self.encoder = modelify(SAEModel.LIAEEncFlow(resolution, self.options['lighter_encoder'], ed_ch_dims=ed_ch_dims, use_bn=use_bn) ) (Input(bgr_shape))
|
self.encoder = modelify(SAEModel.LIAEEncFlow(resolution, self.options['lighter_encoder'], ch_dims=e_ch_dims, **common_flow_kwargs) ) (Input(bgr_shape))
|
||||||
|
|
||||||
enc_output_Inputs = [ Input(K.int_shape(x)[1:]) for x in self.encoder.outputs ]
|
enc_output_Inputs = [ Input(K.int_shape(x)[1:]) for x in self.encoder.outputs ]
|
||||||
|
|
||||||
self.inter_B = modelify(SAEModel.LIAEInterFlow(resolution, ae_dims=ae_dims, use_bn=use_bn)) (enc_output_Inputs)
|
self.inter_B = modelify(SAEModel.LIAEInterFlow(resolution, ae_dims=ae_dims, **common_flow_kwargs)) (enc_output_Inputs)
|
||||||
self.inter_AB = modelify(SAEModel.LIAEInterFlow(resolution, ae_dims=ae_dims, use_bn=use_bn)) (enc_output_Inputs)
|
self.inter_AB = modelify(SAEModel.LIAEInterFlow(resolution, ae_dims=ae_dims, **common_flow_kwargs)) (enc_output_Inputs)
|
||||||
|
|
||||||
inter_output_Inputs = [ Input( np.array(K.int_shape(x)[1:])*(1,1,2) ) for x in self.inter_B.outputs ]
|
inter_output_Inputs = [ Input( np.array(K.int_shape(x)[1:])*(1,1,2) ) for x in self.inter_B.outputs ]
|
||||||
|
|
||||||
self.decoder = modelify(SAEModel.LIAEDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_count=self.ms_count, use_bn=use_bn )) (inter_output_Inputs)
|
self.decoder = modelify(SAEModel.LIAEDecFlow (bgr_shape[2],ch_dims=d_ch_dims, multiscale_count=self.ms_count, add_residual_blocks=d_residual_blocks, **common_flow_kwargs)) (inter_output_Inputs)
|
||||||
|
|
||||||
models_list += [self.encoder, self.inter_B, self.inter_AB, self.decoder]
|
models_list += [self.encoder, self.inter_B, self.inter_AB, self.decoder]
|
||||||
|
|
||||||
if self.options['learn_mask']:
|
if self.options['learn_mask']:
|
||||||
self.decoderm = modelify(SAEModel.LIAEDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5), use_bn=use_bn )) (inter_output_Inputs)
|
self.decoderm = modelify(SAEModel.LIAEDecFlow (mask_shape[2],ch_dims=d_ch_dims, **common_flow_kwargs)) (inter_output_Inputs)
|
||||||
models_list += [self.decoderm]
|
models_list += [self.decoderm]
|
||||||
|
|
||||||
if not self.is_first_run():
|
if not self.is_first_run():
|
||||||
|
@ -176,58 +183,18 @@ class SAEModel(ModelBase):
|
||||||
pred_src_dstm = self.decoderm(warped_src_dst_inter_code)
|
pred_src_dstm = self.decoderm(warped_src_dst_inter_code)
|
||||||
|
|
||||||
elif self.options['archi'] == 'df':
|
elif self.options['archi'] == 'df':
|
||||||
self.encoder = modelify(SAEModel.DFEncFlow(resolution, self.options['lighter_encoder'], ae_dims=ae_dims, ed_ch_dims=ed_ch_dims) ) (Input(bgr_shape))
|
self.encoder = modelify(SAEModel.DFEncFlow(resolution, self.options['lighter_encoder'], ae_dims=ae_dims, ch_dims=e_ch_dims, **common_flow_kwargs) ) (Input(bgr_shape))
|
||||||
|
|
||||||
dec_Inputs = [ Input(K.int_shape(x)[1:]) for x in self.encoder.outputs ]
|
dec_Inputs = [ Input(K.int_shape(x)[1:]) for x in self.encoder.outputs ]
|
||||||
|
|
||||||
self.decoder_src = modelify(SAEModel.DFDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_count=self.ms_count )) (dec_Inputs)
|
self.decoder_src = modelify(SAEModel.DFDecFlow (bgr_shape[2],ch_dims=d_ch_dims, multiscale_count=self.ms_count, add_residual_blocks=d_residual_blocks, **common_flow_kwargs )) (dec_Inputs)
|
||||||
self.decoder_dst = modelify(SAEModel.DFDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2, multiscale_count=self.ms_count )) (dec_Inputs)
|
self.decoder_dst = modelify(SAEModel.DFDecFlow (bgr_shape[2],ch_dims=d_ch_dims, multiscale_count=self.ms_count, add_residual_blocks=d_residual_blocks, **common_flow_kwargs )) (dec_Inputs)
|
||||||
|
|
||||||
models_list += [self.encoder, self.decoder_src, self.decoder_dst]
|
models_list += [self.encoder, self.decoder_src, self.decoder_dst]
|
||||||
|
|
||||||
if self.options['learn_mask']:
|
if self.options['learn_mask']:
|
||||||
self.decoder_srcm = modelify(SAEModel.DFDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (dec_Inputs)
|
self.decoder_srcm = modelify(SAEModel.DFDecFlow (mask_shape[2],ch_dims=d_ch_dims, **common_flow_kwargs )) (dec_Inputs)
|
||||||
self.decoder_dstm = modelify(SAEModel.DFDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (dec_Inputs)
|
self.decoder_dstm = modelify(SAEModel.DFDecFlow (mask_shape[2],ch_dims=d_ch_dims, **common_flow_kwargs )) (dec_Inputs)
|
||||||
models_list += [self.decoder_srcm, self.decoder_dstm]
|
|
||||||
|
|
||||||
if not self.is_first_run():
|
|
||||||
weights_to_load += [ [self.encoder , 'encoder.h5'],
|
|
||||||
[self.decoder_src, 'decoder_src.h5'],
|
|
||||||
[self.decoder_dst, 'decoder_dst.h5']
|
|
||||||
]
|
|
||||||
if self.options['learn_mask']:
|
|
||||||
weights_to_load += [ [self.decoder_srcm, 'decoder_srcm.h5'],
|
|
||||||
[self.decoder_dstm, 'decoder_dstm.h5'],
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
warped_src_code = self.encoder (warped_src)
|
|
||||||
warped_dst_code = self.encoder (warped_dst)
|
|
||||||
pred_src_src = self.decoder_src(warped_src_code)
|
|
||||||
pred_dst_dst = self.decoder_dst(warped_dst_code)
|
|
||||||
pred_src_dst = self.decoder_src(warped_dst_code)
|
|
||||||
|
|
||||||
if self.options['learn_mask']:
|
|
||||||
pred_src_srcm = self.decoder_srcm(warped_src_code)
|
|
||||||
pred_dst_dstm = self.decoder_dstm(warped_dst_code)
|
|
||||||
pred_src_dstm = self.decoder_srcm(warped_dst_code)
|
|
||||||
|
|
||||||
elif self.options['archi'] == 'vg':
|
|
||||||
self.encoder = modelify(SAEModel.VGEncFlow(resolution, self.options['lighter_encoder'], ae_dims=ae_dims, ed_ch_dims=ed_ch_dims) ) (Input(bgr_shape))
|
|
||||||
|
|
||||||
dec_Inputs = [ Input(K.int_shape(x)[1:]) for x in self.encoder.outputs ]
|
|
||||||
|
|
||||||
self.decoder_src = modelify(SAEModel.VGDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2 )) (dec_Inputs)
|
|
||||||
self.decoder_dst = modelify(SAEModel.VGDecFlow (bgr_shape[2],ed_ch_dims=ed_ch_dims//2 )) (dec_Inputs)
|
|
||||||
|
|
||||||
models_list += [self.encoder, self.decoder_src, self.decoder_dst]
|
|
||||||
|
|
||||||
if self.options['learn_mask']:
|
|
||||||
self.decoder_srcm = modelify(SAEModel.VGDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (dec_Inputs)
|
|
||||||
self.decoder_dstm = modelify(SAEModel.VGDecFlow (mask_shape[2],ed_ch_dims=int(ed_ch_dims/1.5) )) (dec_Inputs)
|
|
||||||
models_list += [self.decoder_srcm, self.decoder_dstm]
|
models_list += [self.decoder_srcm, self.decoder_dstm]
|
||||||
|
|
||||||
if not self.is_first_run():
|
if not self.is_first_run():
|
||||||
|
@ -246,22 +213,11 @@ class SAEModel(ModelBase):
|
||||||
pred_dst_dst = self.decoder_dst(warped_dst_code)
|
pred_dst_dst = self.decoder_dst(warped_dst_code)
|
||||||
pred_src_dst = self.decoder_src(warped_dst_code)
|
pred_src_dst = self.decoder_src(warped_dst_code)
|
||||||
|
|
||||||
|
|
||||||
if self.options['learn_mask']:
|
if self.options['learn_mask']:
|
||||||
pred_src_srcm = self.decoder_srcm(warped_src_code)
|
pred_src_srcm = self.decoder_srcm(warped_src_code)
|
||||||
pred_dst_dstm = self.decoder_dstm(warped_dst_code)
|
pred_dst_dstm = self.decoder_dstm(warped_dst_code)
|
||||||
pred_src_dstm = self.decoder_srcm(warped_dst_code)
|
pred_src_dstm = self.decoder_srcm(warped_dst_code)
|
||||||
|
|
||||||
if self.is_first_run() and self.options['ca_weights']:
|
|
||||||
io.log_info ("Initializing CA weights...")
|
|
||||||
conv_weights_list = []
|
|
||||||
for model in models_list:
|
|
||||||
for layer in model.layers:
|
|
||||||
if type(layer) == Conv2D:
|
|
||||||
conv_weights_list += [layer.weights[0]] #Conv2D kernel_weights
|
|
||||||
CAInitializerMP ( conv_weights_list )
|
|
||||||
|
|
||||||
|
|
||||||
pred_src_src, pred_dst_dst, pred_src_dst, = [ [x] if type(x) != list else x for x in [pred_src_src, pred_dst_dst, pred_src_dst, ] ]
|
pred_src_src, pred_dst_dst, pred_src_dst, = [ [x] if type(x) != list else x for x in [pred_src_src, pred_dst_dst, pred_src_dst, ] ]
|
||||||
|
|
||||||
if self.options['learn_mask']:
|
if self.options['learn_mask']:
|
||||||
|
@ -406,7 +362,7 @@ class SAEModel(ModelBase):
|
||||||
]
|
]
|
||||||
if self.options['learn_mask']:
|
if self.options['learn_mask']:
|
||||||
ar += [ [self.decoderm, 'decoderm.h5'] ]
|
ar += [ [self.decoderm, 'decoderm.h5'] ]
|
||||||
elif self.options['archi'] == 'df' or self.options['archi'] == 'vg':
|
elif self.options['archi'] == 'df':
|
||||||
ar += [[self.encoder, 'encoder.h5'],
|
ar += [[self.encoder, 'encoder.h5'],
|
||||||
[self.decoder_src, 'decoder_src.h5'],
|
[self.decoder_src, 'decoder_src.h5'],
|
||||||
[self.decoder_dst, 'decoder_dst.h5']
|
[self.decoder_dst, 'decoder_dst.h5']
|
||||||
|
@ -490,7 +446,7 @@ class SAEModel(ModelBase):
|
||||||
base_blur_mask_modifier=base_blur_mask_modifier,
|
base_blur_mask_modifier=base_blur_mask_modifier,
|
||||||
default_erode_mask_modifier=default_erode_mask_modifier,
|
default_erode_mask_modifier=default_erode_mask_modifier,
|
||||||
default_blur_mask_modifier=default_blur_mask_modifier,
|
default_blur_mask_modifier=default_blur_mask_modifier,
|
||||||
clip_hborder_mask_per=0.0625 if self.options['face_type'] == 'f' else 0)
|
clip_hborder_mask_per=0.0625 if (not self.options['remove_gray_border'] and self.options['face_type'] == 'f') else 0)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def initialize_nn_functions():
|
def initialize_nn_functions():
|
||||||
|
@ -499,96 +455,110 @@ class SAEModel(ModelBase):
|
||||||
def BatchNorm():
|
def BatchNorm():
|
||||||
return BatchNormalization(axis=-1)
|
return BatchNormalization(axis=-1)
|
||||||
|
|
||||||
|
|
||||||
class ResidualBlock(object):
|
class ResidualBlock(object):
|
||||||
def __init__(self, filters, kernel_size=3, padding='same', use_reflection_padding=False):
|
def __init__(self, filters, kernel_size=3, padding='zero', use_reflection_padding=False):
|
||||||
self.filters = filters
|
self.filters = filters
|
||||||
self.kernel_size = kernel_size
|
self.kernel_size = kernel_size
|
||||||
self.padding = padding #if not use_reflection_padding else 'valid'
|
self.padding = padding
|
||||||
self.use_reflection_padding = use_reflection_padding
|
|
||||||
|
|
||||||
def __call__(self, inp):
|
def __call__(self, inp):
|
||||||
var_x = LeakyReLU(alpha=0.2)(inp)
|
var_x = inp
|
||||||
|
|
||||||
#if self.use_reflection_padding:
|
|
||||||
# #var_x = ReflectionPadding2D(stride=1, kernel_size=kernel_size)(var_x)
|
|
||||||
|
|
||||||
var_x = Conv2D(self.filters, kernel_size=self.kernel_size, padding=self.padding)(var_x)
|
var_x = Conv2D(self.filters, kernel_size=self.kernel_size, padding=self.padding)(var_x)
|
||||||
var_x = LeakyReLU(alpha=0.2)(var_x)
|
var_x = LeakyReLU(alpha=0.2)(var_x)
|
||||||
|
var_x = Conv2D(self.filters, kernel_size=self.kernel_size, padding=self.padding)(var_x)
|
||||||
#if self.use_reflection_padding:
|
|
||||||
# #var_x = ReflectionPadding2D(stride=1, kernel_size=kernel_size)(var_x)
|
|
||||||
|
|
||||||
var_x = Conv2D(self.filters, kernel_size=self.kernel_size, padding=self.padding )(var_x)
|
|
||||||
var_x = Scale(gamma_init=keras.initializers.Constant(value=0.1))(var_x)
|
|
||||||
var_x = Add()([var_x, inp])
|
var_x = Add()([var_x, inp])
|
||||||
var_x = LeakyReLU(alpha=0.2)(var_x)
|
var_x = LeakyReLU(alpha=0.2)(var_x)
|
||||||
return var_x
|
return var_x
|
||||||
SAEModel.ResidualBlock = ResidualBlock
|
SAEModel.ResidualBlock = ResidualBlock
|
||||||
|
|
||||||
def downscale (dim, use_bn=False):
|
def ResidualBlock_pre (**base_kwargs):
|
||||||
|
def func(*args, **kwargs):
|
||||||
|
kwargs.update(base_kwargs)
|
||||||
|
return ResidualBlock(*args, **kwargs)
|
||||||
|
return func
|
||||||
|
SAEModel.ResidualBlock_pre = ResidualBlock_pre
|
||||||
|
|
||||||
|
def downscale (dim, padding='zero'):
|
||||||
def func(x):
|
def func(x):
|
||||||
if use_bn:
|
return LeakyReLU(0.1)(Conv2D(dim, kernel_size=5, strides=2, padding=padding)(x))
|
||||||
return LeakyReLU(0.1)(BatchNorm()(Conv2D(dim, kernel_size=5, strides=2, padding='same', use_bias=False)(x)))
|
|
||||||
else:
|
|
||||||
return LeakyReLU(0.1)(Conv2D(dim, kernel_size=5, strides=2, padding='same')(x))
|
|
||||||
return func
|
return func
|
||||||
SAEModel.downscale = downscale
|
SAEModel.downscale = downscale
|
||||||
|
|
||||||
def downscale_sep (dim, use_bn=False):
|
def downscale_pre (**base_kwargs):
|
||||||
|
def func(*args, **kwargs):
|
||||||
|
kwargs.update(base_kwargs)
|
||||||
|
return downscale(*args, **kwargs)
|
||||||
|
return func
|
||||||
|
SAEModel.downscale_pre = downscale_pre
|
||||||
|
|
||||||
|
def downscale_sep (dim, padding='zero'):
|
||||||
def func(x):
|
def func(x):
|
||||||
if use_bn:
|
return LeakyReLU(0.1)(SeparableConv2D(dim, kernel_size=5, strides=2, padding=padding)(x))
|
||||||
return LeakyReLU(0.1)(BatchNorm()(SeparableConv2D(dim, kernel_size=5, strides=2, padding='same', use_bias=False )(x)))
|
|
||||||
else:
|
|
||||||
return LeakyReLU(0.1)(SeparableConv2D(dim, kernel_size=5, strides=2, padding='same' )(x))
|
|
||||||
return func
|
return func
|
||||||
SAEModel.downscale_sep = downscale_sep
|
SAEModel.downscale_sep = downscale_sep
|
||||||
|
|
||||||
def upscale (dim, use_bn=False):
|
def downscale_sep_pre (**base_kwargs):
|
||||||
|
def func(*args, **kwargs):
|
||||||
|
kwargs.update(base_kwargs)
|
||||||
|
return downscale_sep(*args, **kwargs)
|
||||||
|
return func
|
||||||
|
SAEModel.downscale_sep_pre = downscale_sep_pre
|
||||||
|
|
||||||
|
def upscale (dim, padding='zero'):
|
||||||
def func(x):
|
def func(x):
|
||||||
if use_bn:
|
return SubpixelUpscaler()(LeakyReLU(0.1)(Conv2D(dim * 4, kernel_size=3, strides=1, padding=padding)(x)))
|
||||||
return SubpixelUpscaler()(LeakyReLU(0.1)(BatchNorm()(Conv2D(dim * 4, kernel_size=3, strides=1, padding='same', use_bias=False )(x))))
|
|
||||||
else:
|
|
||||||
return SubpixelUpscaler()(LeakyReLU(0.1)(Conv2D(dim * 4, kernel_size=3, strides=1, padding='same')(x)))
|
|
||||||
return func
|
return func
|
||||||
SAEModel.upscale = upscale
|
SAEModel.upscale = upscale
|
||||||
|
|
||||||
def to_bgr (output_nc):
|
def upscale_pre (**base_kwargs):
|
||||||
|
def func(*args, **kwargs):
|
||||||
|
kwargs.update(base_kwargs)
|
||||||
|
return upscale(*args, **kwargs)
|
||||||
|
return func
|
||||||
|
SAEModel.upscale_pre = upscale_pre
|
||||||
|
|
||||||
|
def to_bgr (output_nc, padding='zero'):
|
||||||
def func(x):
|
def func(x):
|
||||||
return Conv2D(output_nc, kernel_size=5, padding='same', activation='sigmoid')(x)
|
return Conv2D(output_nc, kernel_size=5, padding=padding, activation='sigmoid')(x)
|
||||||
return func
|
return func
|
||||||
SAEModel.to_bgr = to_bgr
|
SAEModel.to_bgr = to_bgr
|
||||||
|
|
||||||
|
def to_bgr_pre (**base_kwargs):
|
||||||
|
def func(*args, **kwargs):
|
||||||
|
kwargs.update(base_kwargs)
|
||||||
|
return to_bgr(*args, **kwargs)
|
||||||
|
return func
|
||||||
|
SAEModel.to_bgr_pre = to_bgr_pre
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def LIAEEncFlow(resolution, light_enc, ed_ch_dims=42, use_bn=False):
|
def LIAEEncFlow(resolution, light_enc, ch_dims, padding='zero', **kwargs):
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
exec (nnlib.import_all(), locals(), globals())
|
||||||
upscale = SAEModel.upscale
|
upscale = SAEModel.upscale_pre(padding=padding)
|
||||||
downscale = SAEModel.downscale
|
downscale = SAEModel.downscale_pre(padding=padding)
|
||||||
downscale_sep = SAEModel.downscale_sep
|
downscale_sep = SAEModel.downscale_sep_pre(padding=padding)
|
||||||
|
|
||||||
def func(input):
|
def func(input):
|
||||||
ed_dims = K.int_shape(input)[-1]*ed_ch_dims
|
dims = K.int_shape(input)[-1]*ch_dims
|
||||||
|
|
||||||
x = input
|
x = input
|
||||||
x = downscale(ed_dims)(x)
|
x = downscale(dims)(x)
|
||||||
if not light_enc:
|
if not light_enc:
|
||||||
x = downscale(ed_dims*2, use_bn=use_bn)(x)
|
x = downscale(dims*2)(x)
|
||||||
x = downscale(ed_dims*4, use_bn=use_bn)(x)
|
x = downscale(dims*4)(x)
|
||||||
x = downscale(ed_dims*8, use_bn=use_bn)(x)
|
x = downscale(dims*8)(x)
|
||||||
else:
|
else:
|
||||||
x = downscale_sep(ed_dims*2, use_bn=use_bn)(x)
|
x = downscale_sep(dims*2)(x)
|
||||||
x = downscale(ed_dims*4, use_bn=use_bn)(x)
|
x = downscale(dims*4)(x)
|
||||||
x = downscale_sep(ed_dims*8, use_bn=use_bn)(x)
|
x = downscale_sep(dims*8)(x)
|
||||||
|
|
||||||
x = Flatten()(x)
|
x = Flatten()(x)
|
||||||
return x
|
return x
|
||||||
return func
|
return func
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def LIAEInterFlow(resolution, ae_dims=256, use_bn=False):
|
def LIAEInterFlow(resolution, ae_dims=256, padding='zero', **kwargs):
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
exec (nnlib.import_all(), locals(), globals())
|
||||||
upscale = SAEModel.upscale
|
upscale = SAEModel.upscale_pre(padding=padding)
|
||||||
lowest_dense_res=resolution // 16
|
lowest_dense_res=resolution // 16
|
||||||
|
|
||||||
def func(input):
|
def func(input):
|
||||||
|
@ -596,32 +566,45 @@ class SAEModel(ModelBase):
|
||||||
x = Dense(ae_dims)(x)
|
x = Dense(ae_dims)(x)
|
||||||
x = Dense(lowest_dense_res * lowest_dense_res * ae_dims*2)(x)
|
x = Dense(lowest_dense_res * lowest_dense_res * ae_dims*2)(x)
|
||||||
x = Reshape((lowest_dense_res, lowest_dense_res, ae_dims*2))(x)
|
x = Reshape((lowest_dense_res, lowest_dense_res, ae_dims*2))(x)
|
||||||
x = upscale(ae_dims*2, use_bn=use_bn)(x)
|
x = upscale(ae_dims*2)(x)
|
||||||
return x
|
return x
|
||||||
return func
|
return func
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def LIAEDecFlow(output_nc,ed_ch_dims=21, multiscale_count=1, use_bn=False):
|
def LIAEDecFlow(output_nc,ch_dims, multiscale_count=1, add_residual_blocks=False, padding='zero', **kwargs):
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
exec (nnlib.import_all(), locals(), globals())
|
||||||
upscale = SAEModel.upscale
|
upscale = SAEModel.upscale_pre(padding=padding)
|
||||||
to_bgr = SAEModel.to_bgr
|
to_bgr = SAEModel.to_bgr_pre(padding=padding)
|
||||||
ed_dims = output_nc * ed_ch_dims
|
dims = output_nc * ch_dims
|
||||||
|
ResidualBlock = SAEModel.ResidualBlock_pre(padding=padding)
|
||||||
|
|
||||||
def func(input):
|
def func(input):
|
||||||
x = input[0]
|
x = input[0]
|
||||||
|
|
||||||
outputs = []
|
outputs = []
|
||||||
x1 = upscale(ed_dims*8, use_bn=use_bn)( x )
|
x1 = upscale(dims*8)( x )
|
||||||
|
|
||||||
|
if add_residual_blocks:
|
||||||
|
x1 = ResidualBlock(dims*8)(x1)
|
||||||
|
x1 = ResidualBlock(dims*8)(x1)
|
||||||
|
|
||||||
if multiscale_count >= 3:
|
if multiscale_count >= 3:
|
||||||
outputs += [ to_bgr(output_nc) ( x1 ) ]
|
outputs += [ to_bgr(output_nc) ( x1 ) ]
|
||||||
|
|
||||||
x2 = upscale(ed_dims*4, use_bn=use_bn)( x1 )
|
x2 = upscale(dims*4)( x1 )
|
||||||
|
|
||||||
|
if add_residual_blocks:
|
||||||
|
x2 = ResidualBlock(dims*4)(x2)
|
||||||
|
x2 = ResidualBlock(dims*4)(x2)
|
||||||
|
|
||||||
if multiscale_count >= 2:
|
if multiscale_count >= 2:
|
||||||
outputs += [ to_bgr(output_nc) ( x2 ) ]
|
outputs += [ to_bgr(output_nc) ( x2 ) ]
|
||||||
|
|
||||||
x3 = upscale(ed_dims*2, use_bn=use_bn)( x2 )
|
x3 = upscale(dims*2)( x2 )
|
||||||
|
|
||||||
|
if add_residual_blocks:
|
||||||
|
x3 = ResidualBlock( dims*2)(x3)
|
||||||
|
x3 = ResidualBlock( dims*2)(x3)
|
||||||
|
|
||||||
outputs += [ to_bgr(output_nc) ( x3 ) ]
|
outputs += [ to_bgr(output_nc) ( x3 ) ]
|
||||||
|
|
||||||
|
@ -629,27 +612,27 @@ class SAEModel(ModelBase):
|
||||||
return func
|
return func
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def DFEncFlow(resolution, light_enc, ae_dims=512, ed_ch_dims=42):
|
def DFEncFlow(resolution, light_enc, ae_dims, ch_dims, padding='zero', **kwargs):
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
exec (nnlib.import_all(), locals(), globals())
|
||||||
upscale = SAEModel.upscale
|
upscale = SAEModel.upscale_pre(padding=padding)
|
||||||
downscale = SAEModel.downscale
|
downscale = SAEModel.downscale_pre(padding=padding)
|
||||||
downscale_sep = SAEModel.downscale_sep
|
downscale_sep = SAEModel.downscale_sep_pre(padding=padding)
|
||||||
lowest_dense_res = resolution // 16
|
lowest_dense_res = resolution // 16
|
||||||
|
|
||||||
def func(input):
|
def func(input):
|
||||||
x = input
|
x = input
|
||||||
|
|
||||||
ed_dims = K.int_shape(input)[-1]*ed_ch_dims
|
dims = K.int_shape(input)[-1]*ch_dims
|
||||||
|
|
||||||
x = downscale(ed_dims)(x)
|
x = downscale(dims)(x)
|
||||||
if not light_enc:
|
if not light_enc:
|
||||||
x = downscale(ed_dims*2)(x)
|
x = downscale(dims*2)(x)
|
||||||
x = downscale(ed_dims*4)(x)
|
x = downscale(dims*4)(x)
|
||||||
x = downscale(ed_dims*8)(x)
|
x = downscale(dims*8)(x)
|
||||||
else:
|
else:
|
||||||
x = downscale_sep(ed_dims*2)(x)
|
x = downscale_sep(dims*2)(x)
|
||||||
x = downscale_sep(ed_dims*4)(x)
|
x = downscale(dims*4)(x)
|
||||||
x = downscale_sep(ed_dims*8)(x)
|
x = downscale_sep(dims*8)(x)
|
||||||
|
|
||||||
x = Dense(ae_dims)(Flatten()(x))
|
x = Dense(ae_dims)(Flatten()(x))
|
||||||
x = Dense(lowest_dense_res * lowest_dense_res * ae_dims)(x)
|
x = Dense(lowest_dense_res * lowest_dense_res * ae_dims)(x)
|
||||||
|
@ -660,27 +643,40 @@ class SAEModel(ModelBase):
|
||||||
return func
|
return func
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def DFDecFlow(output_nc, ed_ch_dims=21, multiscale_count=1):
|
def DFDecFlow(output_nc, ch_dims, multiscale_count=1, add_residual_blocks=False, padding='zero', **kwargs):
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
exec (nnlib.import_all(), locals(), globals())
|
||||||
upscale = SAEModel.upscale
|
upscale = SAEModel.upscale_pre(padding=padding)
|
||||||
to_bgr = SAEModel.to_bgr
|
to_bgr = SAEModel.to_bgr_pre(padding=padding)
|
||||||
ed_dims = output_nc * ed_ch_dims
|
dims = output_nc * ch_dims
|
||||||
|
ResidualBlock = SAEModel.ResidualBlock_pre(padding=padding)
|
||||||
|
|
||||||
def func(input):
|
def func(input):
|
||||||
x = input[0]
|
x = input[0]
|
||||||
|
|
||||||
outputs = []
|
outputs = []
|
||||||
x1 = upscale(ed_dims*8)( x )
|
x1 = upscale(dims*8)( x )
|
||||||
|
|
||||||
|
if add_residual_blocks:
|
||||||
|
x1 = ResidualBlock( dims*8 )(x1)
|
||||||
|
x1 = ResidualBlock( dims*8 )(x1)
|
||||||
|
|
||||||
if multiscale_count >= 3:
|
if multiscale_count >= 3:
|
||||||
outputs += [ to_bgr(output_nc) ( x1 ) ]
|
outputs += [ to_bgr(output_nc) ( x1 ) ]
|
||||||
|
|
||||||
x2 = upscale(ed_dims*4)( x1 )
|
x2 = upscale(dims*4)( x1 )
|
||||||
|
|
||||||
|
if add_residual_blocks:
|
||||||
|
x2 = ResidualBlock( dims*4)(x2)
|
||||||
|
x2 = ResidualBlock( dims*4)(x2)
|
||||||
|
|
||||||
if multiscale_count >= 2:
|
if multiscale_count >= 2:
|
||||||
outputs += [ to_bgr(output_nc) ( x2 ) ]
|
outputs += [ to_bgr(output_nc) ( x2 ) ]
|
||||||
|
|
||||||
x3 = upscale(ed_dims*2)( x2 )
|
x3 = upscale(dims*2)( x2 )
|
||||||
|
|
||||||
|
if add_residual_blocks:
|
||||||
|
x3 = ResidualBlock( dims*2)(x3)
|
||||||
|
x3 = ResidualBlock( dims*2)(x3)
|
||||||
|
|
||||||
outputs += [ to_bgr(output_nc) ( x3 ) ]
|
outputs += [ to_bgr(output_nc) ( x3 ) ]
|
||||||
|
|
||||||
|
@ -688,107 +684,4 @@ class SAEModel(ModelBase):
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
|
Model = SAEModel
|
||||||
@staticmethod
|
|
||||||
def VGEncFlow(resolution, light_enc, ae_dims=512, ed_ch_dims=42):
|
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
|
||||||
upscale = SAEModel.upscale
|
|
||||||
downscale = SAEModel.downscale
|
|
||||||
downscale_sep = SAEModel.downscale_sep
|
|
||||||
ResidualBlock = SAEModel.ResidualBlock
|
|
||||||
lowest_dense_res = resolution // 16
|
|
||||||
|
|
||||||
def func(input):
|
|
||||||
x = input
|
|
||||||
ed_dims = K.int_shape(input)[-1]*ed_ch_dims
|
|
||||||
while np.modf(ed_dims / 4)[0] != 0.0:
|
|
||||||
ed_dims -= 1
|
|
||||||
|
|
||||||
in_conv_filters = ed_dims# if resolution <= 128 else ed_dims + (resolution//128)*ed_ch_dims
|
|
||||||
|
|
||||||
x = tmp_x = Conv2D (in_conv_filters, kernel_size=5, strides=2, padding='same') (x)
|
|
||||||
|
|
||||||
for _ in range ( 8 if light_enc else 16 ):
|
|
||||||
x = ResidualBlock(ed_dims)(x)
|
|
||||||
|
|
||||||
x = Add()([x, tmp_x])
|
|
||||||
|
|
||||||
x = downscale(ed_dims)(x)
|
|
||||||
x = SubpixelUpscaler()(x)
|
|
||||||
|
|
||||||
x = downscale(ed_dims)(x)
|
|
||||||
x = SubpixelUpscaler()(x)
|
|
||||||
|
|
||||||
x = downscale(ed_dims)(x)
|
|
||||||
if light_enc:
|
|
||||||
x = downscale_sep (ed_dims*2)(x)
|
|
||||||
else:
|
|
||||||
x = downscale (ed_dims*2)(x)
|
|
||||||
|
|
||||||
x = downscale(ed_dims*4)(x)
|
|
||||||
|
|
||||||
if light_enc:
|
|
||||||
x = downscale_sep (ed_dims*8)(x)
|
|
||||||
else:
|
|
||||||
x = downscale (ed_dims*8)(x)
|
|
||||||
|
|
||||||
x = Dense(ae_dims)(Flatten()(x))
|
|
||||||
x = Dense(lowest_dense_res * lowest_dense_res * ae_dims)(x)
|
|
||||||
x = Reshape((lowest_dense_res, lowest_dense_res, ae_dims))(x)
|
|
||||||
x = upscale(ae_dims)(x)
|
|
||||||
return x
|
|
||||||
|
|
||||||
return func
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def VGDecFlow(output_nc, ed_ch_dims=21, multiscale_count=1):
|
|
||||||
exec (nnlib.import_all(), locals(), globals())
|
|
||||||
upscale = SAEModel.upscale
|
|
||||||
to_bgr = SAEModel.to_bgr
|
|
||||||
ResidualBlock = SAEModel.ResidualBlock
|
|
||||||
ed_dims = output_nc * ed_ch_dims
|
|
||||||
|
|
||||||
def func(input):
|
|
||||||
x = input[0]
|
|
||||||
|
|
||||||
x = upscale( ed_dims*8 )(x)
|
|
||||||
x = ResidualBlock( ed_dims*8 )(x)
|
|
||||||
|
|
||||||
x = upscale( ed_dims*4 )(x)
|
|
||||||
x = ResidualBlock( ed_dims*4 )(x)
|
|
||||||
|
|
||||||
x = upscale( ed_dims*2 )(x)
|
|
||||||
x = ResidualBlock( ed_dims*2 )(x)
|
|
||||||
|
|
||||||
x = to_bgr(output_nc) (x)
|
|
||||||
return x
|
|
||||||
|
|
||||||
return func
|
|
||||||
|
|
||||||
Model = SAEModel
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 'worst' sample booster gives no good result, or I dont know how to filter worst samples properly.
|
|
||||||
#
|
|
||||||
##gathering array of sample_losses
|
|
||||||
#self.src_sample_losses += [[src_sample_idxs[i], src_sample_losses[i]] for i in range(self.batch_size) ]
|
|
||||||
#self.dst_sample_losses += [[dst_sample_idxs[i], dst_sample_losses[i]] for i in range(self.batch_size) ]
|
|
||||||
#
|
|
||||||
#if len(self.src_sample_losses) >= 128: #array is big enough
|
|
||||||
# #fetching idxs which losses are bigger than average
|
|
||||||
# x = np.array (self.src_sample_losses)
|
|
||||||
# self.src_sample_losses = []
|
|
||||||
# b = x[:,1]
|
|
||||||
# idxs = (x[:,0][ np.argwhere ( b [ b > (np.mean(b)+np.std(b)) ] )[:,0] ]).astype(np.uint)
|
|
||||||
# generators_list[0].repeat_sample_idxs(idxs) #ask generator to repeat these sample idxs
|
|
||||||
# print ("src repeated %d" % (len(idxs)) )
|
|
||||||
#
|
|
||||||
#if len(self.dst_sample_losses) >= 128: #array is big enough
|
|
||||||
# #fetching idxs which losses are bigger than average
|
|
||||||
# x = np.array (self.dst_sample_losses)
|
|
||||||
# self.dst_sample_losses = []
|
|
||||||
# b = x[:,1]
|
|
||||||
# idxs = (x[:,0][ np.argwhere ( b [ b > (np.mean(b)+np.std(b)) ] )[:,0] ]).astype(np.uint)
|
|
||||||
# generators_list[1].repeat_sample_idxs(idxs) #ask generator to repeat these sample idxs
|
|
||||||
# print ("dst repeated %d" % (len(idxs)) )
|
|
|
@ -608,6 +608,20 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
|
||||||
return K.tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT')
|
return K.tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT')
|
||||||
elif backend == "plaidML":
|
elif backend == "plaidML":
|
||||||
return TileOP_ReflectionPadding2D.function(x, self.padding[0], self.padding[1])
|
return TileOP_ReflectionPadding2D.function(x, self.padding[0], self.padding[1])
|
||||||
|
else:
|
||||||
|
if K.image_data_format() == 'channels_last':
|
||||||
|
if x.shape.ndims == 4:
|
||||||
|
w = K.concatenate ([ x[:,:,w_pad:0:-1,:],
|
||||||
|
x,
|
||||||
|
x[:,:,-2:-w_pad-2:-1,:] ], axis=2 )
|
||||||
|
h = K.concatenate ([ w[:,h_pad:0:-1,:,:],
|
||||||
|
w,
|
||||||
|
w[:,-2:-h_pad-2:-1,:,:] ], axis=1 )
|
||||||
|
return h
|
||||||
|
else:
|
||||||
|
raise NotImplemented
|
||||||
|
else:
|
||||||
|
raise NotImplemented
|
||||||
|
|
||||||
nnlib.ReflectionPadding2D = ReflectionPadding2D
|
nnlib.ReflectionPadding2D = ReflectionPadding2D
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue