mirror of
https://github.com/iperov/DeepFaceLab.git
synced 2025-07-06 04:52:13 -07:00
Converter: fix output names of merged files, now its 100% same as input,
ConvertAvatar: fix input image after fix landmarks face align, VideoEd: video_from_sequence now uses pipe input to input any filenames instead of %.5d. formatted
This commit is contained in:
parent
23854ac8bc
commit
9f58d160a0
4 changed files with 21 additions and 15 deletions
|
@ -11,7 +11,7 @@ def process_frame_info(frame_info, inp_sh):
|
|||
img = img_uint8.astype(np.float32) / 255.0
|
||||
|
||||
img_mat = LandmarksProcessor.get_transform_mat (frame_info.landmarks_list[0], inp_sh[0], face_type=FaceType.FULL_NO_ALIGN)
|
||||
img = cv2.warpAffine( img, img_mat, inp_sh[0:2], flags=cv2.INTER_CUBIC )
|
||||
img = cv2.warpAffine( img, img_mat, inp_sh[0:2], borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
|
||||
return img
|
||||
|
||||
def ConvertFaceAvatar (cfg, prev_temporal_frame_infos, frame_info, next_temporal_frame_infos):
|
||||
|
|
|
@ -214,12 +214,7 @@ class ConvertSubprocessor(Subprocessor):
|
|||
for i in range( len(self.frames) ):
|
||||
frame = self.frames[i]
|
||||
frame.idx = i
|
||||
|
||||
inp_stem = Path(frame.frame_info.filename).stem
|
||||
if len([ True for symbol in inp_stem if symbol not in digits ]) > 0:
|
||||
frame.output_filename = self.output_path / ('%.5d.png' % (i+1) )
|
||||
else:
|
||||
frame.output_filename = self.output_path / ( inp_stem + '.png' )
|
||||
frame.output_filename = self.output_path / ( Path(frame.frame_info.filename).stem + '.png' )
|
||||
|
||||
frames[0].cfg = self.converter_config.copy()
|
||||
|
||||
|
|
|
@ -167,8 +167,10 @@ def video_from_sequence( input_dir, output_file, reference_file=None, ext=None,
|
|||
if not lossless and bitrate is None:
|
||||
bitrate = max (1, io.input_int ("Bitrate of output file in MB/s ? (default:16) : ", 16) )
|
||||
|
||||
i_in = ffmpeg.input(str (input_path / ('%5d.'+ext)), r=fps)
|
||||
input_image_paths = Path_utils.get_image_paths(input_path)
|
||||
|
||||
i_in = ffmpeg.input('pipe:', format='image2pipe', r=fps)
|
||||
|
||||
output_args = [i_in]
|
||||
|
||||
if ref_in_a is not None:
|
||||
|
@ -193,7 +195,16 @@ def video_from_sequence( input_dir, output_file, reference_file=None, ext=None,
|
|||
})
|
||||
|
||||
job = ( ffmpeg.output(*output_args, **output_kwargs).overwrite_output() )
|
||||
try:
|
||||
job = job.run()
|
||||
|
||||
try:
|
||||
job_run = job.run_async(pipe_stdin=True)
|
||||
|
||||
for image_path in input_image_paths:
|
||||
with open (image_path, "rb") as f:
|
||||
image_bytes = f.read()
|
||||
job_run.stdin.write (image_bytes)
|
||||
|
||||
job_run.stdin.close()
|
||||
job_run.wait()
|
||||
except:
|
||||
io.log_err ("ffmpeg fail, job commandline:" + str(job.compile()) )
|
||||
|
|
|
@ -210,19 +210,19 @@ class SampleProcessor(object):
|
|||
cached_images[img_type] = img
|
||||
|
||||
if is_face_sample and target_face_type != SPTF.NONE:
|
||||
ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[target_face_type]
|
||||
if ft > sample.face_type:
|
||||
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, ft) )
|
||||
target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[target_face_type]
|
||||
if target_ft > sample.face_type:
|
||||
raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_ft) )
|
||||
|
||||
if sample.face_type == FaceType.MARK_ONLY:
|
||||
img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], ft), (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
|
||||
img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], target_ft), (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
|
||||
|
||||
mask = img[...,3:4] if img.shape[2] > 3 else None
|
||||
img = img[...,0:3]
|
||||
img = do_transform (img, mask)
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||
else:
|
||||
img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, ft), (resolution,resolution), flags=cv2.INTER_CUBIC )
|
||||
img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, target_ft), (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
|
||||
|
||||
else:
|
||||
img = cv2.resize( img, (resolution,resolution), cv2.INTER_CUBIC )
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue