From 6c944d8989a7eddcc9079ceaaa0c6ddbc2bbf05f Mon Sep 17 00:00:00 2001 From: iperov Date: Mon, 11 Feb 2019 21:26:51 +0400 Subject: [PATCH] upd readme --- README.md | 2 +- models/Model_SAE/Model.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index a361b61..89e5dad 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,7 @@ SAE tips: - common training algorithm for styled face: set initial face and bg style values to 10.0, train it to 15k-20k epochs, then overwrite settings and set face style to 0.1, bg style to 4.0, and train it up to clear result. -- how to train extremely obstructed face model with SAE? There are no absolute best solution for that. All depends on scene. Experiment with styling values on your own during training. Enable 'write preview history' and track changes. Backup model files every 10k epochs. You can revert model files and change values if something goes wrong. +- how to train extremely obstructed face model with SAE? First train the styled model on clean dst faces without obstructions. Then reuse model files or replace dst images to train your target video. Experiment with styling values on your own during training. Enable 'write preview history' and track changes. Backup model files every 10k epochs. You can revert model files and change values if something goes wrong. Improperly matched dst landmarks may significantly reduce fake quality: diff --git a/models/Model_SAE/Model.py b/models/Model_SAE/Model.py index aa854de..752bb33 100644 --- a/models/Model_SAE/Model.py +++ b/models/Model_SAE/Model.py @@ -236,8 +236,6 @@ class SAEModel(ModelBase): if self.options['learn_mask']: - #src_mask_loss = sum([ K.mean(K.square(target_srcm_ar[i]-pred_src_srcm[i])) for i in range(len(target_srcm_ar)) ]) - #dst_mask_loss = sum([ K.mean(K.square(target_dstm_ar[i]-pred_dst_dstm[i])) for i in range(len(target_dstm_ar)) ]) src_mask_loss = sum([ K.mean(K.square(target_srcm_ar[-1]-pred_src_srcm[-1])) for i in range(len(target_srcm_ar)) ]) dst_mask_loss = sum([ K.mean(K.square(target_dstm_ar[-1]-pred_dst_dstm[-1])) for i in range(len(target_dstm_ar)) ]) self.src_dst_mask_train = K.function ([warped_src, target_srcm, warped_dst, target_dstm],[src_mask_loss, dst_mask_loss], optimizer().get_updates(src_mask_loss+dst_mask_loss, src_dst_mask_loss_train_weights) )