Converter: added option for seamless to supress jittering,

Lenx,leny region now averaged by grayscale gradients,
now uses all CPU.
SAE: multiscale_decoder option default = False
update readme and manual_ru.pdf
This commit is contained in:
iperov 2019-03-03 15:33:52 +04:00
parent 87236921a5
commit 31c2298b5f
29 changed files with 89 additions and 248 deletions

View file

@ -16,15 +16,9 @@ Goal: RTX 2080 TI
bitcoin:31mPd6DxPCzbpCMZk4k1koWAbErSyqkAXr
- ### [Features](doc/doc_features.md)
- ### Manuals:
- ### [Model types](doc/doc_model_types.md)
- ### [Convertor overview](doc/doc_convertor_overview.md)
- ### [Tips and tricks](doc/doc_tips_and_tricks.md)
- ### [Using sort tool](doc/doc_sort_tool.md)
[На русском](doc/manual_ru.pdf)
- ### [Prebuilt windows app](doc/doc_prebuilt_windows_app.md)
@ -32,10 +26,6 @@ bitcoin:31mPd6DxPCzbpCMZk4k1koWAbErSyqkAXr
- ### [Build and repository info](doc/doc_build_and_repository_info.md)
- ### Manuals:
[На русском](doc/manual_ru.pdf)
- ### Communication groups:
(Chinese) QQ group 951138799 for ML/AI experts

View file

@ -1,3 +1,4 @@
import traceback
from .Converter import Converter
from facelib import LandmarksProcessor
from facelib import FaceType
@ -5,7 +6,6 @@ import cv2
import numpy as np
from utils import image_utils
from interact import interact as io
'''
default_mode = {1:'overlay',
2:'hist-match',
@ -34,16 +34,16 @@ class ConverterMasked(Converter):
self.face_type = face_type
self.clip_hborder_mask_per = clip_hborder_mask_per
mode = io.input_int ("Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) seamless hist match, (6) raw. Default - %d : " % (default_mode) , default_mode)
mode = io.input_int ("Choose mode: (1) overlay, (2) hist match, (3) hist match bw, (4) seamless, (5) raw. Default - %d : " % (default_mode) , default_mode)
mode_dict = {1:'overlay',
2:'hist-match',
3:'hist-match-bw',
4:'seamless',
5:'seamless-hist-match',
6:'raw'}
5:'raw'}
self.mode = mode_dict.get (mode, mode_dict[default_mode] )
self.suppress_seamless_jitter = False
if self.mode == 'raw':
mode = io.input_int ("Choose raw mode: (1) rgb, (2) rgb+mask (default), (3) mask only, (4) predicted only : ", 2)
@ -53,6 +53,13 @@ class ConverterMasked(Converter):
4:'predicted-only'}.get (mode, 'rgb-mask')
if self.mode != 'raw':
if self.mode == 'seamless':
io.input_bool ("Suppress seamless jitter? [ y/n ] (?:help skip:n ) : ", False, help_message="Seamless clone produces face jitter. You can suppress it, but process can take a long time." )
if io.input_bool("Seamless hist match? (y/n skip:n) : ", False):
self.mode = 'seamless-hist-match'
if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
self.masked_hist_match = io.input_bool("Masked hist match? (y/n skip:y) : ", True)
@ -60,30 +67,37 @@ class ConverterMasked(Converter):
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold [0..255] (skip:255) : ", 255), 0, 255)
self.use_predicted_mask = io.input_bool("Use predicted mask? (y/n skip:y) : ", True)
if self.mode != 'raw':
self.erode_mask_modifier = base_erode_mask_modifier + np.clip ( io.input_int ("Choose erode mask modifier [-200..200] (skip:%d) : " % (default_erode_mask_modifier), default_erode_mask_modifier), -200, 200)
self.blur_mask_modifier = base_blur_mask_modifier + np.clip ( io.input_int ("Choose blur mask modifier [-200..200] (skip:%d) : " % (default_blur_mask_modifier), default_blur_mask_modifier), -200, 200)
self.seamless_erode_mask_modifier = 0
if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
if 'seamless' in self.mode:
self.seamless_erode_mask_modifier = np.clip ( io.input_int ("Choose seamless erode mask modifier [-100..100] (skip:0) : ", 0), -100, 100)
self.output_face_scale = np.clip ( 1.0 + io.input_int ("Choose output face scale modifier [-50..50] (skip:0) : ", 0)*0.01, 0.5, 1.5)
self.color_transfer_mode = io.input_str ("Apply color transfer to predicted face? Choose mode ( rct/lct skip:None ) : ", None, ['rct','lct'])
if self.mode != 'raw':
self.final_image_color_degrade_power = np.clip ( io.input_int ("Degrade color power of final image [0..100] (skip:0) : ", 0), 0, 100)
self.alpha = io.input_bool("Export png with alpha channel? (y/n skip:n) : ", False)
io.log_info ("")
io.log_info ("")
self.over_res = 4 if self.suppress_seamless_jitter else 1
#override
def dummy_predict(self):
self.predictor_func ( np.zeros ( (self.predictor_input_size,self.predictor_input_size,4), dtype=np.float32 ) )
#override
def convert_face (self, img_bgr, img_face_landmarks, debug):
def convert_face (self, img_bgr, img_face_landmarks, debug):
if self.over_res != 1:
img_bgr = cv2.resize ( img_bgr, ( img_bgr.shape[1]*self.over_res, img_bgr.shape[0]*self.over_res ) )
img_face_landmarks = img_face_landmarks*self.over_res
if debug:
debugs = [img_bgr.copy()]
@ -114,22 +128,17 @@ class ConverterMasked(Converter):
prd_face_mask_a = np.expand_dims (prd_face_mask_a_0, axis=-1)
prd_face_mask_aaa = np.repeat (prd_face_mask_a, (3,), axis=-1)
img_prd_face_mask_aaa = cv2.warpAffine( prd_face_mask_aaa, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
img_prd_face_mask_aaa = np.clip (img_prd_face_mask_aaa, 0.0, 1.0)
img_face_mask_aaa = img_prd_face_mask_aaa
img_face_mask_aaa = cv2.warpAffine( prd_face_mask_aaa, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
img_face_mask_aaa = np.clip (img_face_mask_aaa, 0.0, 1.0)
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0 #get rid of noise
if debug:
debugs += [img_face_mask_aaa.copy()]
img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0
if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
img_face_seamless_mask_aaa = img_face_mask_aaa.copy()
if 'seamless' in self.mode:
img_face_seamless_mask_aaa = img_face_mask_aaa.copy() #mask used for cv2.seamlessClone
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa > 0.9] = 1.0
img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= 0.9] = 0.0
maxregion = np.argwhere(img_face_mask_aaa > 0.9)
out_img = img_bgr.copy()
@ -147,17 +156,29 @@ class ConverterMasked(Converter):
out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(out_img.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
else:
if maxregion.size != 0:
miny,minx = maxregion.min(axis=0)[:2]
maxy,maxx = maxregion.max(axis=0)[:2]
lenx = maxx - minx
leny = maxy - miny
maskx = minx+(lenx/2)
masky = miny+(leny/2)
#averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
ar = []
for i in range(1, 10):
maxregion = np.argwhere( img_face_mask_aaa > i / 10.0 )
if maxregion.size != 0:
miny,minx = maxregion.min(axis=0)[:2]
maxy,maxx = maxregion.max(axis=0)[:2]
lenx = maxx - minx
leny = maxy - miny
maskx = ( minx+(lenx/2) )
masky = ( miny+(leny/2) )
if lenx >= 4 and leny >= 4:
ar += [ [ lenx, leny, maskx, masky] ]
if len(ar) > 0:
lenx, leny, maskx, masky = np.mean ( ar, axis=0 )
if debug:
io.log_info ("maxregion.size: %d, min/max_x:(%d/%d) min/max_y:(%d/%d) mask_x_y:(%d/%d)" % (maxregion.size, minx, maxx, miny, maxy, maskx, masky ) )
io.log_info ("lenx/leny:(%d/%d) maskx/masky:(%f/%f)" % (lenx, leny, maskx, masky ) )
maskx = int( maskx )
masky = int( masky )
if lenx >= 4 and leny >= 4:
lowest_len = min (lenx, leny)
@ -270,14 +291,18 @@ class ConverterMasked(Converter):
if self.mode == 'overlay':
pass
if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
try:
if 'seamless' in self.mode:
try:
out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), (img_bgr*255).astype(np.uint8), (img_face_seamless_mask_aaa*255).astype(np.uint8), (maskx,masky) , cv2.NORMAL_CLONE )
out_img = out_img.astype(dtype=np.float32) / 255.0
except:
except Exception as e:
#seamlessClone may fail in some cases
pass
e_str = traceback.format_exc()
if 'MemoryError' in e_str:
raise Exception("Seamless fail: " + e_str) #reraise MemoryError in order to reprocess this data by other processes
else:
print ("Seamless fail: " + e_str)
if debug:
debugs += [out_img.copy()]
@ -303,6 +328,9 @@ class ConverterMasked(Converter):
if self.alpha:
out_img = np.concatenate ( [out_img, np.expand_dims (img_mask_blurry_aaa[:,:,0],-1)], -1 )
if self.over_res != 1:
out_img = cv2.resize ( out_img, ( img_bgr.shape[1] // self.over_res, img_bgr.shape[0] // self.over_res ) )
out_img = np.clip (out_img, 0.0, 1.0 )
if debug:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 243 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

View file

@ -1 +0,0 @@
![](DeepFaceLab_convertor_overview.png)

View file

@ -1,38 +0,0 @@
### **Features**:
- works on AMD, NVIDIA, IntelHD graphics and all OpenCL1.2-compatible videocards with at least 256M video memory. Of course, the more memory available, the better quality you will get.
- CPU-only mode [`--cpu-mode`]. 8th gen Intel core CPU able to train H64 model in 2 days.
- Windows build is standalone ready to work program and contains all dependencies (CUDA, OpenCL, ffmpeg, .bat script etc) to start working
- New models expanding upon the original faceswap model.
- Model architecture designed with experimentation in mind.
- Face metadata embedded into extracted JPG files.
- Extractor and Converter run in parallel.
- Debug mode option for all stages: [`--debug`]
- Multiple extraction modes: MTCNN, dlib, or manual.
#### Extractor Examples
##### MTCNN
Predicts faces more uniformly than dlib, resulting in a less jittered aligned output. However, MTCNN extraction will produce more false positives.
Comparison dlib (at left) vs mtcnn on hard case:
![](https://i.imgur.com/5qLiiOV.gif)
- **Manual Extractor**
A manual extractor is available. This extractor uses the preview GUI to allow the user to properly align detected faces.
![](manual_extractor_0.jpg)
This mode can also be used to fix incorrectly extracted faces. Manual extraction can be used to greatly improve training on face sets that are heavily obstructed.
![Result](https://user-images.githubusercontent.com/8076202/38454756-0fa7a86c-3a7e-11e8-9065-182b4a8a7a43.gif)

View file

@ -1,49 +0,0 @@
- **H64 (2GB+)** - half face with 64 resolution. It is as original FakeApp or FaceSwap, but with new TensorFlow 1.8 DSSIM Loss func and separated mask decoder + better ConverterMasked. for 2GB and 3GB VRAM model works in reduced mode.
H64 Robert Downey Jr.:
![](H64_Downey_0.jpg)
![](H64_Downey_1.jpg)
- **H128 (3GB+)** - as H64, but in 128 resolution. Better face details. for 3GB and 4GB VRAM model works in reduced mode.
H128 Cage:
![](H128_Cage_0.jpg)
H128 asian face on blurry target:
![](H128_Asian_0.jpg)
![](H128_Asian_1.jpg)
- **DF (5GB+)** - @dfaker model. As H128, but fullface model. Strongly recommended not to mix various light conditions in src faces.
![](DF_Cage_0.jpg)
- **LIAEF128 (5GB+)** - Less agressive Improved Autoencoder Fullface 128 model. Result of combining DF, IAE, + experiments. Model tries to morph src face to dst, while keeping facial features of src face, but less agressive morphing. Model has problems with closed eyes recognizing.
LIAEF128 Cage:
![](LIAEF128_Cage_0.jpg)
![](LIAEF128_Cage_1.jpg)
- **SAE ( minimum 2GB+, recommended 11GB+ )** - Styled AutoEncoder - new superior model based on style loss. SAE is very flexible model, contains all other models, by default works as stylizer/morpher and does not guarantee that predicted face will look as src, but you can disable styling values to work as classic model. SAE is better than classic models due to multiscale decoder and smooth transition from DSSIM to MSE(pixel) loss. Face obstructions can be reconstructed without any masks. Converter mode 'overlay' should be used if styling enabled. Model has several options on start for fine tuning to fit your GPU. For more info read tips below.
![](SAE_Asian_0.jpg)
![](SAE_Cage_0.jpg)
![](SAE_Musk_0.jpg)
SAE model Cage-Trump video: https://www.youtube.com/watch?v=2R_aqHBClUQ
SAE model Elon Musk - Robert Downey jr video: https://www.youtube.com/watch?v=OLWFnPwzgEY
Scene with extremely obstructed face in helmet, that cannot be handled by any other classic faceswap model (how to train it read tips):
![](SAE_Cage_1.jpg)
![](SAE_Cage_2.jpg)

View file

@ -1,35 +0,0 @@
### **Sort tool**:
Extraction is rarely perfect and a final pass by human eyes is typically required. This is the best way to remove unmatched or misaligned faces quickly. The sort tool included in the project greatly reduces the time and effort required to clean large sets. Like pictures will be grouped together and false positives can be quickly be identified.
`blur` places most blurred faces at end of folder
`hist` groups images by similar content
`hist-dissim` places most dissimilar to each other images to begin.
`hist-blur` sort by blur in groups of similar content
`face-pitch` sort by face pitch direction
`face-yaw` sort by face yaw direction
`brightness`
`hue`
`black` Places images which contains black area at end of folder. Useful to get rid of src faces which cutted by screen.
`final` sorts by yaw, blur, and hist, and leaves best 1500-1700 images.
Suggested sort workflow for gathering src faceset from very large image pools:
1) `black` -> then delete faces cutted by black area at end of folder
2) `blur` -> then delete blurred faces at end of folder
3) `hist` -> then delete groups of similar unwanted faces and leave only target face
4) `final` -> then delete faces occluded by obstructions
Suggested sort workflow for cleaning dst faceset:
1) delete first unsorted aligned groups of images what you can to delete. Dont touch target face mixed with others.
2) `hist` -> then delete groups of similar and leave only target face

View file

@ -1,68 +0,0 @@
### **Tips and tricks**:
unfortunately deepfaking is time/eletricpower consuming topic and has a lot of nuances.
Quality of src faceset significantly affects the final face.
Narrow src face is better fakeable than wide. This is why Cage is so popular in deepfakes.
Every model is good for specific scenes and faces.
H64 - good for straight faces as a demo and for low vram.
H128 - good for straight faces, gives higher resolution and details.
DF - good for side faces, but results in a lower resolution and details. Covers more area of cheeks. Keeps face unmorphed. Good for similar face shapes.
LIAE - can partially fix dissimilar face shapes, but results in a less recognizable face.
SAE - new flexible model. Absolute best in 2019.
SAE tips:
- SAE - actually contains all other models, but better due to smooth DSSIM-MSE(pixel loss) transition. Just set style powers to 0.0 to work as default (H128/DF/LIAE) model.
- if src faceset has number of faces more than dst faceset, model can be not converged. In this case try 'Feed faces to network sorted by yaw' option.
- if src face wider than dst, model can be not converged. In this case try to decrease 'Src face scale modifier' to -5.
- default architecture 'df' make predicted face looking more like src, but if model not converges try 'liae'.
- if you have a lot of VRAM, you can choose between batch size that affects quality of generalization and enc/dec dims that affects image quality.
- common training algorithm for styled face: set initial face and bg style values to 10.0, train it to 15k-20k epochs, then overwrite settings and set face style to 0.1, bg style to 4.0, and train it up to clear result.
- how to train extremely obstructed face model with SAE? First train the styled model on clean dst faces without obstructions. Then start new training on your target video, save it on 1+ epoch, replace model files with pretrained model and continue training. Experiment with styling values on your own during training. Enable 'write preview history' and track changes. Backup model files every 10k epochs. You can revert model files and change values if something goes wrong.
Improperly matched dst landmarks may significantly reduce fake quality:
![](https://github.com/iperov/DeepFaceLab/blob/master/doc/Tips_improperly_dst_landmarks_0.jpg)
![](https://github.com/iperov/DeepFaceLab/blob/master/doc/Tips_improperly_dst_landmarks_1.jpg)
in this case watch "Manual re-extract bad dst aligned frames tutorial" below.
@GAN-er advanced tips:
Tip 1:
You may benefit by starting with a small batch size (within reason) and increasing it later. The reason is that a **large batch size will give you a more accurate descent direction but it will also be costlier to calculate**, and when you just start, you care mostly about the general direction; no need to sacrifice speed for precision at that point. There are plenty of sources discussing the batch size, as an example you can check this one:
https://stats.stackexchange.com/questions/164876/tradeoff-batch-size-vs-number-of-iterations-to-train-a-neural-network
Tip 2:
Unlike the batch size that the only thing that does is affecting how accurate each step will be as far a the true gradient goes, the dimensions, actually, increase the complexity of your NN. As a rule, **the more complex a network the better the resulting model**, but since nothing comes for free, **the more complex the network the more time it will take to converge**.
What you generally want is to **_figure out the max dimensions that you can use_** given your GPU's memory, and your desired max batch size.
You can set the max batch size to something, say K, and then increase the dimensions until you get OOM errors. In the end, you will end up with a triplet, {batch size, ae_dims, ed_dims}
Ideally, you would use 1024 and 85 for your autoencoder and encoder/decoder dimensions, but no card has enough memory for such a configuration even with batch size 1.
Remember that unlike batch size that you can change at will, once you set up the dimensions you can not change them.
Note that **if you use a complex - high number of dimensions NN, in combination with a small batch size, it will take _considerably_ longer for your model to converge**. So keep that in mind! You will simply have to wait longer, but also you will get a much much better result.
For cards with 11Gb of memory, and for SAE you can try the following settings:
For DF architecture: 12 698 51
For LIAEF architecture: 8 402 47
Tip 3:
If you end up being stuck, i.e. the loss does not go down but for no obvious reason or if you get weird artifacts in some previews before you discard and start from scratch, you may want to flip your DST and SRC for a while. This often is all you need to keep things going again.
Tip 4:
99.995% of your success or failure rate is due to bad SRC or DST sets. This means that 99.995% of your time should be spent in actually ensuring that your sets are well curated. Throwing together a hot podge of material and expecting a decent outcome is guaranteed to result in disappointment. Garbage in, garbage out.

Binary file not shown.

Binary file not shown.

View file

@ -5,6 +5,9 @@ import sys
from interact import interact as io
class Subprocessor(object):
class SilenceException(Exception):
pass
class Cli(object):
def __init__ ( self, client_dict ):
@ -71,14 +74,16 @@ class Subprocessor(object):
self.on_finalize()
c2s.put ( {'op': 'finalized'} )
return
except Subprocessor.SilenceException as e:
pass
except Exception as e:
if data is not None:
print ('Exception while process data [%s]: %s' % (self.get_data_name(data), traceback.format_exc()) )
else:
print ('Exception: %s' % (traceback.format_exc()) )
c2s.put ( {'op': 'error', 'data' : data} )
c2s.put ( {'op': 'error', 'data' : data} )
#overridable
def __init__(self, name, SubprocessorCli_class, no_response_time_sec = 60):

View file

@ -28,7 +28,12 @@ class ConvertSubprocessor(Subprocessor):
self.output_path = Path(client_dict['output_dir']) if 'output_dir' in client_dict.keys() else None
self.alignments = client_dict['alignments']
self.debug = client_dict['debug']
#transfer and set stdin in order to work code.interact in debug subprocess
stdin_fd = client_dict['stdin_fd']
if stdin_fd is not None:
sys.stdin = os.fdopen(stdin_fd)
from nnlib import nnlib
#model process ate all GPU mem,
#so we cannot use GPU for any TF operations in converter processes
@ -99,8 +104,11 @@ class ConvertSubprocessor(Subprocessor):
image = self.converter.convert_face(image, image_landmarks, self.debug)
except Exception as e:
self.log_info ( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, str(e)) )
traceback.print_exc()
e_str = traceback.format_exc()
if 'MemoryError' in e_str:
raise Subprocessor.SilenceException
else:
raise Exception( 'Error while converting face_num [%d] in file [%s]: %s' % (face_num, filename_path, e_str) )
if self.debug:
return (1, debug_images)
@ -136,7 +144,7 @@ class ConvertSubprocessor(Subprocessor):
#override
def process_info_generator(self):
r = [0] if self.debug else range( min(multiprocessing.cpu_count(), 6) )
r = [0] if self.debug else range(multiprocessing.cpu_count())
for i in r:
yield 'CPU%d' % (i), {}, {'device_idx': i,
@ -144,14 +152,15 @@ class ConvertSubprocessor(Subprocessor):
'converter' : self.process_converter,
'output_dir' : str(self.output_path),
'alignments' : self.alignments,
'debug': self.debug
'debug': self.debug,
'stdin_fd': sys.stdin.fileno() if self.debug else None
}
#overridable optional
def on_clients_initialized(self):
if self.debug:
io.named_window ("Debug convert")
io.progress_bar ("Converting", len (self.input_data) )
#overridable optional

View file

@ -55,12 +55,12 @@ class SAEModel(ModelBase):
self.options['lighter_encoder'] = io.input_bool ("Use lightweight encoder? (y/n, ?:help skip:n) : ", False, help_message="Lightweight encoder is 35% faster, requires less VRAM, but sacrificing overall quality.")
if self.options['archi'] != 'vg':
self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:y) : ", True, help_message="Multiscale decoder helps to get better details.")
self.options['multiscale_decoder'] = io.input_bool ("Use multiscale decoder? (y/n, ?:help skip:y) : ", False, help_message="Multiscale decoder helps to get better details.")
else:
self.options['lighter_encoder'] = self.options.get('lighter_encoder', False)
if self.options['archi'] != 'vg':
self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', True)
self.options['multiscale_decoder'] = self.options.get('multiscale_decoder', False)
default_face_style_power = 0.0
default_bg_style_power = 0.0