diff --git a/core/interact/interact.txt b/core/interact/interact.txt new file mode 100644 index 0000000..3b6d621 --- /dev/null +++ b/core/interact/interact.txt @@ -0,0 +1,40 @@ +4 +Output image format +4 +Which GPU indexes to choose? +Face type +Max number of faces from image +Image size +Jpeg quality +Write debug images to aligned_debug? +Which GPU indexes to choose? +Face type +Image size +Jpeg quality + +Which GPU indexes to choose? +Autobackup every N hour +Write preview history +Flip SRC faces randomly +Flip DST faces randomly +Batch_size +Eyes and mouth priority +Uniform yaw distribution of samples +Blur out mask +Place models and optimizer on GPU +Use AdaBelief optimizer? +Use learning rate dropout +Enable random warp of samples +Random hue/saturation/light intensity +GAN power +Face style power +Background style power +Color transfer for src faceset +Enable gradient clipping +Enable pretraining mode + +Which GPU indexes to choose? +Use interactive merger? +Number of workers? +Use saved session? +Bitrate of output file in MB/s diff --git a/core/interact/no_interact_dict.py b/core/interact/no_interact_dict.py index e0d0817..4caff83 100644 --- a/core/interact/no_interact_dict.py +++ b/core/interact/no_interact_dict.py @@ -1,59 +1,51 @@ import pickle dictionary = { -'Output image format' : 'png', -'Override' : '0', -'Enable gradient clipping' : 'False', -' Use saved session? ' : 'False', -'Enable pretraining mode' : 'False', -' Press enter in 2 seconds to override model settings. ' : '\n', -'[0] Which GPU indexes to choose? : ' : '0', -'[wf] Face type ( f/wf/head ?:help ) : ' : 'wf', -'[0] Max number of faces from image ( ?:help ) : ' : '0', -'[512] Image size ( 256-2048 ?:help ) : ' : '512', -'[90] Jpeg quality ( 1-100 ?:help ) : ' : '90', -'[n] Write debug images to aligned_debug? ( y/n ) : ' : 'False', -'[y] Continue extraction? ( y/n ?:help ) : ' : 'True', -'[2] Autobackup every N hour ( 0..24 ?:help ) : ' : '2', -'[n] Write preview history ( y/n ?:help ) : ' : 'False', -'[83000] Target iteration : ' : '\n', -'[n] Flip SRC faces randomly ( y/n ?:help ) : ' : 'False', -'[n] Flip DST faces randomly ( y/n ?:help ) : ' : 'False', -'[4] Batch_size ( ?:help ) : ' : '4', -'[n] Eyes and mouth priority ( y/n ?:help ) : ' : 'False', -'[y] Uniform yaw distribution of samples ( y/n ?:help ) : ' : 'True', -'[n] Blur out mask ( y/n ?:help ) : ' : 'False', -'[y] Place models and optimizer on GPU ( y/n ?:help ) : ' : 'True', -'[y] Use AdaBelief optimizer? ( y/n ?:help ) : ' : 'True', -'[n] Use learning rate dropout ( n/y/cpu ?:help ) : ' : 'n', -'[y] Enable random warp of samples ( y/n ?:help ) : ' : 'True', -'[0.0] Random hue/saturation/light intensity ( 0.0 .. 0.3 ?:help ) : ' : '0.0', -'[0.0] GAN power ( 0.0 .. 5.0 ?:help ) : ' : '0.0', -'[0.0] Face style power ( 0.0..100.0 ?:help ) : ' : '0.0', -'[0.0] Background style power ( 0.0..100.0 ?:help ) : ' : '0.0', -'[lct] Color transfer for src faceset ( none/rct/lct/mkl/idt/sot ?:help ) : ' : 'lct', -'[n] Use interactive merger? ( y/n ) : ' : 'n', -'[1] 2 : ' : '1', -'[1] 3 : ' : '4', -'[0] Choose erode mask modifier ( -400..400 ) : ' : '100', -'[0] Choose blur mask modifier ( 0..400 ) : ' : '150', -'[0] Choose motion blur power ( 0..100 ) : ' : '0', -'[0] Choose output face scale modifier ( -50..50 ) : ' : '0', -'[0] 1 ( ?:help ) : ' : '0', -'[0] Choose super resolution power ( 0..100 ?:help ) : ' : '0', -'[0] Choose image degrade by denoise power ( 0..500 ) : ' : '0', -'[0] Choose image degrade by bicubic rescale power ( 0..100 ) : ' : '0', -'[0] Degrade color power of final image ( 0..100 ) : ' : '0', -'Color transfer to predicted face ( rct/lct/mkl/mkl-m/idt/idt-m/sot-m/mix-m ) : ' : 'rct', -'[8] Number of workers? ( 1-8 ?:help ) : ' : '8', -'[16] Bitrate of output file in MB/s : ' : '16', + '4' : '\n', + 'Output image format':'png', + 'Which GPU indexes to choose?': '0', +'Face type': 'wf', +'Max number of faces from image' : '1', +'Image size' : '512', +'Jpeg quality' : '90', +'Write debug images to aligned_debug?': 'False', +'Autobackup every N hour':'2', +'Write preview history' : 'False', +'Flip SRC faces randomly':'False', +'Flip DST faces randomly':'False', +'Batch_size': '4', +'Eyes and mouth priority':'False', +'Uniform yaw distribution of samples':'True', +'Blur out mask':'False', +'Place models and optimizer on GPU' : 'True', +'Use AdaBelief optimizer?' : 'True', +'Use learning rate dropout' : 'False', +'Enable random warp of samples' : 'True', +'Random hue/saturation/light intensity' : '0.0', +'GAN power' : '0.0', +'Face style power' : '0.0', +'Background style power': '0.0', +'Color transfer for src faceset' : 'lct', +'Enable gradient clipping': 'False', +'Enable pretraining mode' : 'False', +'Use interactive merger?':'False', +'Number of workers?':'8', +'Use saved session?':'False', +'Bitrate of output file in MB/s' : '16', +'Choose erode mask modifier', +'Choose blur mask modifier', +'Choose motion blur power' : '0', +'Choose output face scale modifier' : '0', +'Choose super resolution power' : '0', +'Choose image degrade by denoise power' : '0', +'Choose image degrade by bicubic rescale power' : '0', +'Degrade color power of final image' : '0', +'Color transfer to predicted face' : 'rct', } with open('/home/deepfake/interact_dict.pkl', 'wb') as handle: pickle.dump(dictionary, handle, protocol=4) with open('/home/deepfake/interact_dict.pkl', 'rb') as handle: d = pickle.load(handle) -s = "Use saved" -res = dict(filter(lambda item: s in item[0], d.items())) -print(list(res.values())[0]) +print(d['Color transfer to predicted face'])