nothing interesting

This commit is contained in:
Colombo 2019-12-11 22:33:23 +04:00
parent 154820a954
commit e8673e3fcc
10 changed files with 339 additions and 262 deletions

View file

@ -2,18 +2,24 @@ import numpy as np
import cv2 import cv2
from utils import random_utils from utils import random_utils
def gen_warp_params (source, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05] ): def gen_warp_params (source, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5], tx_range=[-0.05, 0.05], ty_range=[-0.05, 0.05], rnd_seed=None ):
h,w,c = source.shape h,w,c = source.shape
if (h != w): if (h != w):
raise ValueError ('gen_warp_params accepts only square images.') raise ValueError ('gen_warp_params accepts only square images.')
rotation = np.random.uniform( rotation_range[0], rotation_range[1] ) if rnd_seed != None:
scale = np.random.uniform(1 +scale_range[0], 1 +scale_range[1]) rnd_state = np.random.RandomState (rnd_seed)
tx = np.random.uniform( tx_range[0], tx_range[1] ) else:
ty = np.random.uniform( ty_range[0], ty_range[1] ) rnd_state = np.random
rotation = rnd_state.uniform( rotation_range[0], rotation_range[1] )
scale = rnd_state.uniform(1 +scale_range[0], 1 +scale_range[1])
tx = rnd_state.uniform( tx_range[0], tx_range[1] )
ty = rnd_state.uniform( ty_range[0], ty_range[1] )
p_flip = flip and rnd_state.randint(10) < 4
#random warp by grid #random warp by grid
cell_size = [ w // (2**i) for i in range(1,4) ] [ np.random.randint(3) ] cell_size = [ w // (2**i) for i in range(1,4) ] [ rnd_state.randint(3) ]
cell_count = w // cell_size + 1 cell_count = w // cell_size + 1
grid_points = np.linspace( 0, w, cell_count) grid_points = np.linspace( 0, w, cell_count)
@ -37,7 +43,7 @@ def gen_warp_params (source, flip, rotation_range=[-10,10], scale_range=[-0.5, 0
params['mapy'] = mapy params['mapy'] = mapy
params['rmat'] = random_transform_mat params['rmat'] = random_transform_mat
params['w'] = w params['w'] = w
params['flip'] = flip and np.random.randint(10) < 4 params['flip'] = p_flip
return params return params

View file

@ -47,13 +47,13 @@ class Model(ModelBase):
self.set_training_data_generators ([ self.set_training_data_generators ([
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True), sample_process_options=SampleProcessor.Options(random_flip=True),
output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR), 'resolution' : self.resolution, 'motion_blur':(25, 5), 'gaussian_blur':(25,5), 'border_replicate':False, 'random_hsv_shift' : True }, output_sample_types=[ { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_BGR_RANDOM_HSV_SHIFT), 'resolution' : self.resolution, 'motion_blur':(25, 5), 'gaussian_blur':(25,5), 'border_replicate':False},
{ 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_M), 'resolution': self.resolution }, { 'types': (t.IMG_WARPED_TRANSFORMED, face_type, t.MODE_M), 'resolution': self.resolution },
]), ]),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True ), sample_process_options=SampleProcessor.Options(random_flip=True ),
output_sample_types=[ { 'types': (t.IMG_TRANSFORMED , face_type, t.MODE_BGR), 'resolution' : self.resolution, 'random_hsv_shift' : True}, output_sample_types=[ { 'types': (t.IMG_TRANSFORMED , face_type, t.MODE_BGR_RANDOM_HSV_SHIFT), 'resolution' : self.resolution},
]) ])
]) ])

View file

@ -23,9 +23,9 @@ class FUNITModel(ModelBase):
#override #override
def onInitializeOptions(self, is_first_run, ask_override): def onInitializeOptions(self, is_first_run, ask_override):
default_resolution = 96 default_resolution = 64
if is_first_run: if is_first_run:
self.options['resolution'] = io.input_int(f"Resolution ( 96,128,224 ?:help skip:{default_resolution}) : ", default_resolution, [128,224]) self.options['resolution'] = io.input_int(f"Resolution ( 64,96,128,224 ?:help skip:{default_resolution}) : ", default_resolution, [64,96,128,224])
else: else:
self.options['resolution'] = self.options.get('resolution', default_resolution) self.options['resolution'] = self.options.get('resolution', default_resolution)
@ -48,7 +48,7 @@ class FUNITModel(ModelBase):
resolution = self.options['resolution'] resolution = self.options['resolution']
face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF
person_id_max_count = SampleGeneratorFace.get_person_id_max_count(self.training_data_src_path) person_id_max_count = SampleGeneratorFacePerson.get_person_id_max_count(self.training_data_src_path)
self.model = FUNIT( face_type_str=FaceType.toString(face_type), self.model = FUNIT( face_type_str=FaceType.toString(face_type),
@ -85,21 +85,21 @@ class FUNITModel(ModelBase):
output_sample_types1=[ {'types': (t.IMG_SOURCE, face_type, t.MODE_BGR), 'resolution':resolution, 'normalize_tanh':True} ] output_sample_types1=[ {'types': (t.IMG_SOURCE, face_type, t.MODE_BGR), 'resolution':resolution, 'normalize_tanh':True} ]
self.set_training_data_generators ([ self.set_training_data_generators ([
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFacePerson(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0] ), sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0] ),
output_sample_types=output_sample_types, person_id_mode=True ), output_sample_types=output_sample_types, person_id_mode=1, use_caching=True, generators_count=1 ),
SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFacePerson(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0] ), sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0] ),
output_sample_types=output_sample_types, person_id_mode=True ), output_sample_types=output_sample_types, person_id_mode=1, use_caching=True, generators_count=1 ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFacePerson(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0]), sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0]),
output_sample_types=output_sample_types1, person_id_mode=True ), output_sample_types=output_sample_types1, person_id_mode=1, use_caching=True, generators_count=1 ),
SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, SampleGeneratorFacePerson(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size,
sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0]), sample_process_options=SampleProcessor.Options(random_flip=True, rotation_range=[0,0]),
output_sample_types=output_sample_types1, person_id_mode=True ), output_sample_types=output_sample_types1, person_id_mode=1, use_caching=True, generators_count=1 ),
]) ])
#override #override

View file

@ -93,6 +93,7 @@ Model = keras.models.Model
Adam = nnlib.Adam Adam = nnlib.Adam
RMSprop = nnlib.RMSprop RMSprop = nnlib.RMSprop
LookaheadOptimizer = nnlib.LookaheadOptimizer LookaheadOptimizer = nnlib.LookaheadOptimizer
SGD = nnlib.keras.optimizers.SGD
modelify = nnlib.modelify modelify = nnlib.modelify
gaussian_blur = nnlib.gaussian_blur gaussian_blur = nnlib.gaussian_blur
@ -765,9 +766,10 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
2 - allows to train x3 bigger network on same VRAM consuming RAM*2 and CPU power. 2 - allows to train x3 bigger network on same VRAM consuming RAM*2 and CPU power.
""" """
def __init__(self, learning_rate=0.001, rho=0.9, tf_cpu_mode=0, **kwargs): def __init__(self, learning_rate=0.001, rho=0.9, lr_dropout=0, tf_cpu_mode=0, **kwargs):
self.initial_decay = kwargs.pop('decay', 0.0) self.initial_decay = kwargs.pop('decay', 0.0)
self.epsilon = kwargs.pop('epsilon', K.epsilon()) self.epsilon = kwargs.pop('epsilon', K.epsilon())
self.lr_dropout = lr_dropout
self.tf_cpu_mode = tf_cpu_mode self.tf_cpu_mode = tf_cpu_mode
learning_rate = kwargs.pop('lr', learning_rate) learning_rate = kwargs.pop('lr', learning_rate)
@ -788,6 +790,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
dtype=K.dtype(p), dtype=K.dtype(p),
name='accumulator_' + str(i)) name='accumulator_' + str(i))
for (i, p) in enumerate(params)] for (i, p) in enumerate(params)]
if self.lr_dropout != 0:
lr_rnds = [ K.random_binomial(K.int_shape(p), p=self.lr_dropout, dtype=K.dtype(p)) for p in params ]
if e: e.__exit__(None, None, None) if e: e.__exit__(None, None, None)
self.weights = [self.iterations] + accumulators self.weights = [self.iterations] + accumulators
@ -798,12 +802,15 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations, lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay)))) K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators): for i, (p, g, a) in enumerate(zip(params, grads, accumulators)):
# update accumulator # update accumulator
e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None
if e: e.__enter__() if e: e.__enter__()
new_a = self.rho * a + (1. - self.rho) * K.square(g) new_a = self.rho * a + (1. - self.rho) * K.square(g)
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon) p_diff = - lr * g / (K.sqrt(new_a) + self.epsilon)
if self.lr_dropout != 0:
p_diff *= lr_rnds[i]
new_p = p + p_diff
if e: e.__exit__(None, None, None) if e: e.__exit__(None, None, None)
self.updates.append(K.update(a, new_a)) self.updates.append(K.update(a, new_a))
@ -828,7 +835,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
config = {'learning_rate': float(K.get_value(self.learning_rate)), config = {'learning_rate': float(K.get_value(self.learning_rate)),
'rho': float(K.get_value(self.rho)), 'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)), 'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon} 'epsilon': self.epsilon,
'lr_dropout' : self.lr_dropout }
base_config = super(RMSprop, self).get_config() base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
nnlib.RMSprop = RMSprop nnlib.RMSprop = RMSprop
@ -847,6 +855,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
amsgrad: boolean. Whether to apply the AMSGrad variant of this amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and algorithm from the paper "On the Convergence of Adam and
Beyond". Beyond".
lr_dropout: float [0.0 .. 1.0] Learning rate dropout https://arxiv.org/pdf/1912.00144
tf_cpu_mode: only for tensorflow backend tf_cpu_mode: only for tensorflow backend
0 - default, no changes. 0 - default, no changes.
1 - allows to train x2 bigger network on same VRAM consuming RAM 1 - allows to train x2 bigger network on same VRAM consuming RAM
@ -860,7 +869,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
""" """
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., amsgrad=False, tf_cpu_mode=0, **kwargs): epsilon=None, decay=0., amsgrad=False, lr_dropout=0, tf_cpu_mode=0, **kwargs):
super(Adam, self).__init__(**kwargs) super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__): with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations') self.iterations = K.variable(0, dtype='int64', name='iterations')
@ -873,6 +882,7 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
self.epsilon = epsilon self.epsilon = epsilon
self.initial_decay = decay self.initial_decay = decay
self.amsgrad = amsgrad self.amsgrad = amsgrad
self.lr_dropout = lr_dropout
self.tf_cpu_mode = tf_cpu_mode self.tf_cpu_mode = tf_cpu_mode
def get_updates(self, loss, params): def get_updates(self, loss, params):
@ -896,11 +906,16 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else: else:
vhats = [K.zeros(1) for _ in params] vhats = [K.zeros(1) for _ in params]
if self.lr_dropout != 0:
lr_rnds = [ K.random_binomial(K.int_shape(p), p=self.lr_dropout, dtype=K.dtype(p)) for p in params ]
if e: e.__exit__(None, None, None) if e: e.__exit__(None, None, None)
self.weights = [self.iterations] + ms + vs + vhats self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): for i, (p, g, m, v, vhat) in enumerate( zip(params, grads, ms, vs, vhats) ):
e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None e = K.tf.device("/cpu:0") if self.tf_cpu_mode == 2 else None
if e: e.__enter__() if e: e.__enter__()
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
@ -912,13 +927,16 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
if e: e.__exit__(None, None, None) if e: e.__exit__(None, None, None)
if self.amsgrad: if self.amsgrad:
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon) p_diff = - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
else: else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) p_diff = - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
if self.lr_dropout != 0:
p_diff *= lr_rnds[i]
self.updates.append(K.update(m, m_t)) self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t)) self.updates.append(K.update(v, v_t))
new_p = p_t new_p = p + p_diff
# Apply constraints. # Apply constraints.
if getattr(p, 'constraint', None) is not None: if getattr(p, 'constraint', None) is not None:
@ -933,7 +951,8 @@ NLayerDiscriminator = nnlib.NLayerDiscriminator
'beta_2': float(K.get_value(self.beta_2)), 'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)), 'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon, 'epsilon': self.epsilon,
'amsgrad': self.amsgrad} 'amsgrad': self.amsgrad,
'lr_dropout' : self.lr_dropout}
base_config = super(Adam, self).get_config() base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items())) return dict(list(base_config.items()) + list(config.items()))
nnlib.Adam = Adam nnlib.Adam = Adam

View file

@ -55,7 +55,6 @@ class SampleGeneratorFace(SampleGeneratorBase):
raise ValueError('No training data provided.') raise ValueError('No training data provided.')
ct_samples = SampleLoader.load (SampleType.FACE, random_ct_samples_path) if random_ct_samples_path is not None else None ct_samples = SampleLoader.load (SampleType.FACE, random_ct_samples_path) if random_ct_samples_path is not None else None
self.random_ct_sample_chance = 100
if self.debug: if self.debug:
self.generators_count = 1 self.generators_count = 1
@ -133,16 +132,12 @@ class SampleGeneratorFace(SampleGeneratorBase):
try: try:
ct_sample=None ct_sample=None
if ct_samples is not None: if ct_samples is not None:
if np.random.randint(100) < self.random_ct_sample_chance:
ct_sample=ct_samples[np.random.randint(ct_samples_len)] ct_sample=ct_samples[np.random.randint(ct_samples_len)]
x = SampleProcessor.process (sample, self.sample_process_options, self.output_sample_types, self.debug, ct_sample=ct_sample) x, = SampleProcessor.process ([sample], self.sample_process_options, self.output_sample_types, self.debug, ct_sample=ct_sample)
except: except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) ) raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) )
if type(x) != tuple and type(x) != list:
raise Exception('SampleProcessor.process returns NOT tuple/list')
if batches is None: if batches is None:
batches = [ [] for _ in range(len(x)) ] batches = [ [] for _ in range(len(x)) ]
if self.add_sample_idx: if self.add_sample_idx:

View file

@ -1,3 +1,4 @@
import copy
import multiprocessing import multiprocessing
import traceback import traceback
@ -37,6 +38,9 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
self.generators_random_seed = generators_random_seed self.generators_random_seed = generators_random_seed
samples = SampleLoader.load (SampleType.FACE, self.samples_path, person_id_mode=True, use_caching=use_caching) samples = SampleLoader.load (SampleType.FACE, self.samples_path, person_id_mode=True, use_caching=use_caching)
samples = copy.copy(samples)
for i in range(len(samples)):
samples[i] = copy.copy(samples[i])
if person_id_mode==1: if person_id_mode==1:
np.random.shuffle(samples) np.random.shuffle(samples)
@ -52,6 +56,7 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
if len(sample) == 0: if len(sample) == 0:
samples.pop(i) samples.pop(i)
samples = new_samples samples = new_samples
#new_samples = [] #new_samples = []
#for s in samples: #for s in samples:
# new_samples += s # new_samples += s
@ -111,6 +116,18 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
samples_idxs = [None]*persons_count samples_idxs = [None]*persons_count
shuffle_idxs = [None]*persons_count shuffle_idxs = [None]*persons_count
for i in range(persons_count):
samples_idxs[i] = [*range(len(samples[i]))]
shuffle_idxs[i] = []
elif self.person_id_mode==3:
persons_count = len(samples)
person_idxs = [ *range(persons_count) ]
shuffle_person_idxs = []
samples_idxs = [None]*persons_count
shuffle_idxs = [None]*persons_count
for i in range(persons_count): for i in range(persons_count):
samples_idxs[i] = [*range(len(samples[i]))] samples_idxs[i] = [*range(len(samples[i]))]
shuffle_idxs[i] = [] shuffle_idxs[i] = []
@ -130,13 +147,13 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
if self.person_id_mode==1: if self.person_id_mode==1:
if len(shuffle_idxs) == 0: if len(shuffle_idxs) == 0:
shuffle_idxs = samples_idxs.copy() shuffle_idxs = samples_idxs.copy()
#np.random.shuffle(shuffle_idxs) np.random.shuffle(shuffle_idxs) ###
idx = shuffle_idxs.pop() idx = shuffle_idxs.pop()
sample = samples[ idx ] sample = samples[ idx ]
try: try:
x = SampleProcessor.process (sample, self.sample_process_options, self.output_sample_types, self.debug) x, = SampleProcessor.process ([sample], self.sample_process_options, self.output_sample_types, self.debug)
except: except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) ) raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) )
@ -155,7 +172,7 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
batches[i_person_id].append ( np.array([sample.person_id]) ) batches[i_person_id].append ( np.array([sample.person_id]) )
else: elif self.person_id_mode==2:
person_id1, person_id2 = person_ids person_id1, person_id2 = person_ids
if len(shuffle_idxs[person_id1]) == 0: if len(shuffle_idxs[person_id1]) == 0:
@ -174,12 +191,12 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
if sample1 is not None and sample2 is not None: if sample1 is not None and sample2 is not None:
try: try:
x1 = SampleProcessor.process (sample1, self.sample_process_options, self.output_sample_types, self.debug) x1, = SampleProcessor.process ([sample1], self.sample_process_options, self.output_sample_types, self.debug)
except: except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample1.filename, traceback.format_exc() ) ) raise Exception ("Exception occured in sample %s. Error: %s" % (sample1.filename, traceback.format_exc() ) )
try: try:
x2 = SampleProcessor.process (sample2, self.sample_process_options, self.output_sample_types, self.debug) x2, = SampleProcessor.process ([sample2], self.sample_process_options, self.output_sample_types, self.debug)
except: except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample2.filename, traceback.format_exc() ) ) raise Exception ("Exception occured in sample %s. Error: %s" % (sample2.filename, traceback.format_exc() ) )
@ -203,7 +220,56 @@ class SampleGeneratorFacePerson(SampleGeneratorBase):
batches[i_person_id2].append ( np.array([sample2.person_id]) ) batches[i_person_id2].append ( np.array([sample2.person_id]) )
elif self.person_id_mode==3:
if len(shuffle_person_idxs) == 0:
shuffle_person_idxs = person_idxs.copy()
np.random.shuffle(shuffle_person_idxs)
person_id = shuffle_person_idxs.pop()
if len(shuffle_idxs[person_id]) == 0:
shuffle_idxs[person_id] = samples_idxs[person_id].copy()
np.random.shuffle(shuffle_idxs[person_id])
idx = shuffle_idxs[person_id].pop()
sample1 = samples[person_id][idx]
if len(shuffle_idxs[person_id]) == 0:
shuffle_idxs[person_id] = samples_idxs[person_id].copy()
np.random.shuffle(shuffle_idxs[person_id])
idx = shuffle_idxs[person_id].pop()
sample2 = samples[person_id][idx]
if sample1 is not None and sample2 is not None:
try:
x1, = SampleProcessor.process ([sample1], self.sample_process_options, self.output_sample_types, self.debug)
except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample1.filename, traceback.format_exc() ) )
try:
x2, = SampleProcessor.process ([sample2], self.sample_process_options, self.output_sample_types, self.debug)
except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample2.filename, traceback.format_exc() ) )
x1_len = len(x1)
if batches is None:
batches = [ [] for _ in range(x1_len) ]
batches += [ [] ]
i_person_id1 = len(batches)-1
batches += [ [] for _ in range(len(x2)) ]
batches += [ [] ]
i_person_id2 = len(batches)-1
for i in range(x1_len):
batches[i].append ( x1[i] )
for i in range(len(x2)):
batches[x1_len+1+i].append ( x2[i] )
batches[i_person_id1].append ( np.array([sample1.person_id]) )
batches[i_person_id2].append ( np.array([sample2.person_id]) )
yield [ np.array(batch) for batch in batches] yield [ np.array(batch) for batch in batches]

View file

@ -71,7 +71,7 @@ class SampleGeneratorFaceTemporal(SampleGeneratorBase):
for i in range( self.temporal_image_count ): for i in range( self.temporal_image_count ):
sample = samples[ idx+i*mult ] sample = samples[ idx+i*mult ]
try: try:
temporal_samples += SampleProcessor.process (sample, self.sample_process_options, self.output_sample_types, self.debug) temporal_samples += SampleProcessor.process ([sample], self.sample_process_options, self.output_sample_types, self.debug)[0]
except: except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) ) raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) )

View file

@ -66,7 +66,7 @@ class SampleGeneratorImageTemporal(SampleGeneratorBase):
for i in range( self.temporal_image_count ): for i in range( self.temporal_image_count ):
sample = samples[ idx+i*mult ] sample = samples[ idx+i*mult ]
try: try:
temporal_samples += SampleProcessor.process (sample, self.sample_process_options, self.output_sample_types, self.debug) temporal_samples += SampleProcessor.process ([sample], self.sample_process_options, self.output_sample_types, self.debug)[0]
except: except:
raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) ) raise Exception ("Exception occured in sample %s. Error: %s" % (sample.filename, traceback.format_exc() ) )

View file

@ -92,9 +92,13 @@ class SampleProcessor(object):
} }
@staticmethod @staticmethod
def process (sample, sample_process_options, output_sample_types, debug, ct_sample=None): def process (samples, sample_process_options, output_sample_types, debug, ct_sample=None):
SPTF = SampleProcessor.Types SPTF = SampleProcessor.Types
sample_rnd_seed = np.random.randint(0x80000000)
outputs = []
for sample in samples:
sample_bgr = sample.load_bgr() sample_bgr = sample.load_bgr()
ct_sample_bgr = None ct_sample_bgr = None
ct_sample_mask = None ct_sample_mask = None
@ -105,13 +109,9 @@ class SampleProcessor(object):
if debug and is_face_sample: if debug and is_face_sample:
LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0)) LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0))
params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range ) params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range, rnd_seed=sample_rnd_seed )
cached_images = collections.defaultdict(dict) outputs_sample = []
sample_rnd_seed = np.random.randint(0x80000000)
outputs = []
for opts in output_sample_types: for opts in output_sample_types:
resolution = opts.get('resolution', 0) resolution = opts.get('resolution', 0)
@ -124,7 +124,6 @@ class SampleProcessor(object):
motion_blur = opts.get('motion_blur', None) motion_blur = opts.get('motion_blur', None)
gaussian_blur = opts.get('gaussian_blur', None) gaussian_blur = opts.get('gaussian_blur', None)
random_hsv_shift = opts.get('random_hsv_shift', None)
ct_mode = opts.get('ct_mode', 'None') ct_mode = opts.get('ct_mode', 'None')
normalize_tanh = opts.get('normalize_tanh', False) normalize_tanh = opts.get('normalize_tanh', False)
@ -265,18 +264,6 @@ class SampleProcessor(object):
img_bgr = imagelib.color_transfer_sot (img_bgr, ct_sample_bgr_resized) img_bgr = imagelib.color_transfer_sot (img_bgr, ct_sample_bgr_resized)
img_bgr = np.clip( img_bgr, 0.0, 1.0) img_bgr = np.clip( img_bgr, 0.0, 1.0)
if random_hsv_shift:
rnd_state = np.random.RandomState (sample_rnd_seed)
hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
h = (h + rnd_state.randint(360) ) % 360
s = np.clip ( s + rnd_state.random()-0.5, 0, 1 )
v = np.clip ( v + rnd_state.random()-0.5, 0, 1 )
hsv = cv2.merge([h, s, v])
img_bgr = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 )
if normalize_std_dev: if normalize_std_dev:
img_bgr = (img_bgr - img_bgr.mean( (0,1)) ) / img_bgr.std( (0,1) ) img_bgr = (img_bgr - img_bgr.mean( (0,1)) ) / img_bgr.std( (0,1) )
elif normalize_vgg: elif normalize_vgg:
@ -290,6 +277,16 @@ class SampleProcessor(object):
elif mode_type == SPTF.MODE_BGR_SHUFFLE: elif mode_type == SPTF.MODE_BGR_SHUFFLE:
rnd_state = np.random.RandomState (sample_rnd_seed) rnd_state = np.random.RandomState (sample_rnd_seed)
img = np.take (img_bgr, rnd_state.permutation(img_bgr.shape[-1]), axis=-1) img = np.take (img_bgr, rnd_state.permutation(img_bgr.shape[-1]), axis=-1)
elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
rnd_state = np.random.RandomState (sample_rnd_seed)
hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
h = (h + rnd_state.randint(360) ) % 360
s = np.clip ( s + rnd_state.random()-0.5, 0, 1 )
v = np.clip ( v + rnd_state.random()-0.5, 0, 1 )
hsv = cv2.merge([h, s, v])
img = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 )
elif mode_type == SPTF.MODE_G: elif mode_type == SPTF.MODE_G:
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)[...,None] img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)[...,None]
elif mode_type == SPTF.MODE_GGG: elif mode_type == SPTF.MODE_GGG:
@ -303,19 +300,9 @@ class SampleProcessor(object):
else: else:
img = np.clip (img, 0.0, 1.0) img = np.clip (img, 0.0, 1.0)
outputs.append ( img ) outputs_sample.append ( img )
outputs += [outputs_sample]
if debug:
result = []
for output in outputs:
if output.shape[2] < 4:
result += [output,]
elif output.shape[2] == 4:
result += [output[...,0:3]*output[...,3:4],]
return result
else:
return outputs return outputs
""" """

View file

@ -22,7 +22,7 @@ class ThisThreadGenerator(object):
return next(self.generator_func) return next(self.generator_func)
class SubprocessGenerator(object): class SubprocessGenerator(object):
def __init__(self, generator_func, user_param=None, prefetch=2): def __init__(self, generator_func, user_param=None, prefetch=2, start_now=False):
super().__init__() super().__init__()
self.prefetch = prefetch self.prefetch = prefetch
self.generator_func = generator_func self.generator_func = generator_func
@ -30,6 +30,16 @@ class SubprocessGenerator(object):
self.sc_queue = multiprocessing.Queue() self.sc_queue = multiprocessing.Queue()
self.cs_queue = multiprocessing.Queue() self.cs_queue = multiprocessing.Queue()
self.p = None self.p = None
if start_now:
self._start()
def _start(self):
if self.p == None:
user_param = self.user_param
self.user_param = None
self.p = multiprocessing.Process(target=self.process_func, args=(user_param,) )
self.p.daemon = True
self.p.start()
def process_func(self, user_param): def process_func(self, user_param):
self.generator_func = self.generator_func(user_param) self.generator_func = self.generator_func(user_param)
@ -54,13 +64,7 @@ class SubprocessGenerator(object):
return self_dict return self_dict
def __next__(self): def __next__(self):
if self.p == None: self._start()
user_param = self.user_param
self.user_param = None
self.p = multiprocessing.Process(target=self.process_func, args=(user_param,) )
self.p.daemon = True
self.p.start()
gen_data = self.cs_queue.get() gen_data = self.cs_queue.get()
if gen_data is None: if gen_data is None:
self.p.terminate() self.p.terminate()