chore: refactor code quality issues

This commit is contained in:
Aksh Gupta 2021-03-30 07:16:52 +00:00
commit 745b1fdcb1
15 changed files with 31 additions and 23 deletions

10
.deepsource.toml Normal file
View file

@ -0,0 +1,10 @@
version = 1
exclude_patterns = ["samplelib/**"]
[[analyzers]]
name = "python"
enabled = true
[analyzers.meta]
runtime_version = "3.x.x"

View file

@ -1206,7 +1206,7 @@ class MainWindow(QXMainWindow):
self.image_paths_done = [] self.image_paths_done = []
self.image_paths = image_paths self.image_paths = image_paths
self.image_paths_has_ie_polys = image_paths_has_ie_polys self.image_paths_has_ie_polys = image_paths_has_ie_polys
self.set_has_ie_polys_count ( len([ 1 for x in self.image_paths_has_ie_polys if self.image_paths_has_ie_polys[x] == True]) ) self.set_has_ie_polys_count ( len([ 1 for x in self.image_paths_has_ie_polys if self.image_paths_has_ie_polys[x] is True]) )
self.loading_frame.hide() self.loading_frame.hide()
self.loading_frame = None self.loading_frame = None

View file

@ -24,7 +24,7 @@ def cv2_imread(filename, flags=cv2.IMREAD_UNCHANGED, loader_func=None, verbose=T
def cv2_imwrite(filename, img, *args): def cv2_imwrite(filename, img, *args):
ret, buf = cv2.imencode( Path(filename).suffix, img, *args) ret, buf = cv2.imencode( Path(filename).suffix, img, *args)
if ret == True: if ret is True:
try: try:
with open(filename, "wb") as stream: with open(filename, "wb") as stream:
stream.write( buf ) stream.write( buf )

View file

@ -37,7 +37,7 @@ def gen_warp_params (w, flip, rotation_range=[-10,10], scale_range=[-0.5, 0.5],
random_transform_mat = cv2.getRotationMatrix2D((w // 2, w // 2), rotation, scale) random_transform_mat = cv2.getRotationMatrix2D((w // 2, w // 2), rotation, scale)
random_transform_mat[:, 2] += (tx*w, ty*w) random_transform_mat[:, 2] += (tx*w, ty*w)
params = dict() params = {}
params['mapx'] = mapx params['mapx'] = mapx
params['mapy'] = mapy params['mapy'] = mapy
params['rmat'] = random_transform_mat params['rmat'] = random_transform_mat

View file

@ -90,7 +90,7 @@ class Subprocessor(object):
# disable pickling # disable pickling
def __getstate__(self): def __getstate__(self):
return dict() return {}
def __setstate__(self, d): def __setstate__(self, d):
self.__dict__.update(d) self.__dict__.update(d)

View file

@ -22,8 +22,7 @@ class ModelBase(nn.Saveable):
for subname in layer.keys(): for subname in layer.keys():
sublayer = layer[subname] sublayer = layer[subname]
self._build_sub(sublayer, f"{name}_{subname}") self._build_sub(sublayer, f"{name}_{subname}")
elif isinstance (layer, nn.LayerBase) or \ elif isinstance (layer, (nn.LayerBase, ModelBase)):
isinstance (layer, ModelBase):
if layer.name is None: if layer.name is None:
layer.name = name layer.name = name

View file

@ -16,8 +16,7 @@ def batch_set_value(tuples):
feed_dict = {} feed_dict = {}
for x, value in tuples: for x, value in tuples:
if isinstance(value, nn.tf.Operation) or \ if isinstance(value, (nn.tf.Operation, nn.tf.Variable)):
isinstance(value, nn.tf.Variable):
assign_ops.append(value) assign_ops.append(value)
else: else:
value = np.asarray(value, dtype=x.dtype.as_numpy_dtype) value = np.asarray(value, dtype=x.dtype.as_numpy_dtype)

View file

@ -48,7 +48,7 @@ class IndexHost():
# disable pickling # disable pickling
def __getstate__(self): def __getstate__(self):
return dict() return {}
def __setstate__(self, d): def __setstate__(self, d):
self.__dict__.update(d) self.__dict__.update(d)
@ -137,7 +137,7 @@ class Index2DHost():
# disable pickling # disable pickling
def __getstate__(self): def __getstate__(self):
return dict() return {}
def __setstate__(self, d): def __setstate__(self, d):
self.__dict__.update(d) self.__dict__.update(d)
@ -203,7 +203,7 @@ class ListHost():
# disable pickling # disable pickling
def __getstate__(self): def __getstate__(self):
return dict() return {}
def __setstate__(self, d): def __setstate__(self, d):
self.__dict__.update(d) self.__dict__.update(d)
@ -278,7 +278,7 @@ class DictHost():
# disable pickling # disable pickling
def __getstate__(self): def __getstate__(self):
return dict() return {}
def __setstate__(self, d): def __setstate__(self, d):
self.__dict__.update(d) self.__dict__.update(d)

View file

@ -91,7 +91,7 @@ class QSubprocessor(object):
# disable pickling # disable pickling
def __getstate__(self): def __getstate__(self):
return dict() return {}
def __setstate__(self, d): def __setstate__(self, d):
self.__dict__.update(d) self.__dict__.update(d)

View file

@ -34,4 +34,4 @@ to_string_dict = { FaceType.HALF : 'half_face',
FaceType.MARK_ONLY :'mark_only', FaceType.MARK_ONLY :'mark_only',
} }
from_string_dict = { to_string_dict[x] : x for x in to_string_dict.keys() } from_string_dict = { to_string_dict[x] : x for x in to_string_dict }

View file

@ -246,7 +246,7 @@ class S3FDExtractor(object):
return bboxlist return bboxlist
def refine_nms(self, dets, thresh): def refine_nms(self, dets, thresh):
keep = list() keep = []
if len(dets) == 0: if len(dets) == 0:
return keep return keep

View file

@ -531,7 +531,7 @@ class InteractiveMergerSubprocessor(Subprocessor):
self.process_remain_frames = False self.process_remain_frames = False
return (self.is_interactive and self.is_interactive_quitting) or \ return (self.is_interactive and self.is_interactive_quitting) or \
(not self.is_interactive and self.process_remain_frames == False) (not self.is_interactive and self.process_remain_frames is False)
#override #override

View file

@ -33,7 +33,7 @@ class MergerConfig(object):
#overridable #overridable
def ask_settings(self): def ask_settings(self):
s = """Choose sharpen mode: \n""" s = """Choose sharpen mode: \n"""
for key in self.sharpen_dict.keys(): for key in self.sharpen_dict:
s += f"""({key}) {self.sharpen_dict[key]}\n""" s += f"""({key}) {self.sharpen_dict[key]}\n"""
io.log_info(s) io.log_info(s)
self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.") self.sharpen_mode = io.input_int ("", 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.")
@ -79,7 +79,7 @@ mode_dict = {0:'original',
5:'raw-rgb', 5:'raw-rgb',
6:'raw-predict'} 6:'raw-predict'}
mode_str_dict = { mode_dict[key] : key for key in mode_dict.keys() } mode_str_dict = { mode_dict[key] : key for key in mode_dict }
mask_mode_dict = {1:'dst', mask_mode_dict = {1:'dst',
2:'learned-prd', 2:'learned-prd',
@ -189,7 +189,7 @@ class MergerConfigMasked(MergerConfig):
def ask_settings(self): def ask_settings(self):
s = """Choose mode: \n""" s = """Choose mode: \n"""
for key in mode_dict.keys(): for key in mode_dict:
s += f"""({key}) {mode_dict[key]}\n""" s += f"""({key}) {mode_dict[key]}\n"""
io.log_info(s) io.log_info(s)
mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) ) mode = io.input_int ("", mode_str_dict.get(self.default_mode, 1) )
@ -204,7 +204,7 @@ class MergerConfigMasked(MergerConfig):
self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255) self.hist_match_threshold = np.clip ( io.input_int("Hist match threshold", 255, add_info="0..255"), 0, 255)
s = """Choose mask mode: \n""" s = """Choose mask mode: \n"""
for key in mask_mode_dict.keys(): for key in mask_mode_dict:
s += f"""({key}) {mask_mode_dict[key]}\n""" s += f"""({key}) {mask_mode_dict[key]}\n"""
io.log_info(s) io.log_info(s)
self.mask_mode = io.input_int ("", 1, valid_list=mask_mode_dict.keys() ) self.mask_mode = io.input_int ("", 1, valid_list=mask_mode_dict.keys() )

View file

@ -232,7 +232,7 @@ class QModel(ModelBase):
self.set_training_data_generators ([ self.set_training_data_generators ([
SampleGeneratorFace(training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(), SampleGeneratorFace(training_data_src_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False), sample_process_options=SampleProcessor.Options(random_flip=bool(self.pretrain)),
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution} {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}
@ -240,7 +240,7 @@ class QModel(ModelBase):
generators_count=src_generators_count ), generators_count=src_generators_count ),
SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(), SampleGeneratorFace(training_data_dst_path, debug=self.is_debug(), batch_size=self.get_batch_size(),
sample_process_options=SampleProcessor.Options(random_flip=True if self.pretrain else False), sample_process_options=SampleProcessor.Options(random_flip=bool(self.pretrain)),
output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, output_sample_types = [ {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':True, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}, {'sample_type': SampleProcessor.SampleType.FACE_IMAGE,'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.BGR, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution},
{'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution} {'sample_type': SampleProcessor.SampleType.FACE_MASK, 'warp':False, 'transform':True, 'channel_type' : SampleProcessor.ChannelType.G, 'face_mask_type' : SampleProcessor.FaceMaskType.FULL_FACE, 'face_type':self.face_type, 'data_format':nn.data_format, 'resolution': resolution}

View file

@ -177,7 +177,7 @@ Examples: df, liae, df-d, df-ud, liae-ud, ...
self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims']) self.gan_model_changed = (default_gan_patch_size != self.options['gan_patch_size']) or (default_gan_dims != self.options['gan_dims'])
self.pretrain_just_disabled = (default_pretrain == True and self.options['pretrain'] == False) self.pretrain_just_disabled = (default_pretrain is True and self.options['pretrain'] == False)
#override #override
def on_initialize(self): def on_initialize(self):