mirror of
https://github.com/clinton-hall/nzbToMedia.git
synced 2025-08-19 21:03:14 -07:00
Fix tests and flake
This commit is contained in:
parent
db1cc6145e
commit
78f28f382e
15 changed files with 22 additions and 26 deletions
|
@ -202,13 +202,11 @@ FORKS: typing.Mapping[str, typing.Mapping] = {
|
||||||
ALL_FORKS = {
|
ALL_FORKS = {
|
||||||
k: None
|
k: None
|
||||||
for k in set(
|
for k in set(
|
||||||
list(
|
itertools.chain.from_iterable(
|
||||||
itertools.chain.from_iterable(
|
[
|
||||||
[
|
FORKS[x].keys()
|
||||||
FORKS[x].keys()
|
for x in FORKS.keys()
|
||||||
for x in FORKS.keys()
|
],
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ import requests
|
||||||
|
|
||||||
import nzb2media
|
import nzb2media
|
||||||
from nzb2media.auto_process.common import ProcessResult
|
from nzb2media.auto_process.common import ProcessResult
|
||||||
from nzb2media.utils.common import flatten
|
|
||||||
from nzb2media.utils.encoding import convert_to_ascii
|
from nzb2media.utils.encoding import convert_to_ascii
|
||||||
from nzb2media.utils.network import server_responding
|
from nzb2media.utils.network import server_responding
|
||||||
from nzb2media.utils.paths import remote_dir
|
from nzb2media.utils.paths import remote_dir
|
||||||
|
|
|
@ -7,7 +7,6 @@ import requests
|
||||||
|
|
||||||
import nzb2media
|
import nzb2media
|
||||||
from nzb2media.auto_process.common import ProcessResult
|
from nzb2media.auto_process.common import ProcessResult
|
||||||
from nzb2media.utils.common import flatten
|
|
||||||
from nzb2media.utils.encoding import convert_to_ascii
|
from nzb2media.utils.encoding import convert_to_ascii
|
||||||
from nzb2media.utils.network import server_responding
|
from nzb2media.utils.network import server_responding
|
||||||
from nzb2media.utils.paths import remote_dir
|
from nzb2media.utils.paths import remote_dir
|
||||||
|
|
|
@ -8,7 +8,6 @@ import requests
|
||||||
|
|
||||||
import nzb2media
|
import nzb2media
|
||||||
from nzb2media.auto_process.common import ProcessResult
|
from nzb2media.auto_process.common import ProcessResult
|
||||||
from nzb2media.utils.common import flatten
|
|
||||||
from nzb2media.utils.encoding import convert_to_ascii
|
from nzb2media.utils.encoding import convert_to_ascii
|
||||||
from nzb2media.utils.network import server_responding
|
from nzb2media.utils.network import server_responding
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,6 @@ from nzb2media.auto_process.common import completed_download_handling
|
||||||
from nzb2media.plugins.subtitles import import_subs
|
from nzb2media.plugins.subtitles import import_subs
|
||||||
from nzb2media.plugins.subtitles import rename_subs
|
from nzb2media.plugins.subtitles import rename_subs
|
||||||
from nzb2media.scene_exceptions import process_all_exceptions
|
from nzb2media.scene_exceptions import process_all_exceptions
|
||||||
from nzb2media.utils.common import flatten
|
|
||||||
from nzb2media.utils.encoding import convert_to_ascii
|
from nzb2media.utils.encoding import convert_to_ascii
|
||||||
from nzb2media.utils.files import list_media_files
|
from nzb2media.utils.files import list_media_files
|
||||||
from nzb2media.utils.identification import find_imdbid
|
from nzb2media.utils.identification import find_imdbid
|
||||||
|
|
|
@ -11,7 +11,6 @@ import nzb2media
|
||||||
from nzb2media.auto_process.common import ProcessResult
|
from nzb2media.auto_process.common import ProcessResult
|
||||||
from nzb2media.auto_process.common import command_complete
|
from nzb2media.auto_process.common import command_complete
|
||||||
from nzb2media.scene_exceptions import process_all_exceptions
|
from nzb2media.scene_exceptions import process_all_exceptions
|
||||||
from nzb2media.utils.common import flatten
|
|
||||||
from nzb2media.utils.encoding import convert_to_ascii
|
from nzb2media.utils.encoding import convert_to_ascii
|
||||||
from nzb2media.utils.files import list_media_files
|
from nzb2media.utils.files import list_media_files
|
||||||
from nzb2media.utils.network import server_responding
|
from nzb2media.utils.network import server_responding
|
||||||
|
|
|
@ -394,8 +394,7 @@ def process(
|
||||||
else:
|
else:
|
||||||
s = requests.Session()
|
s = requests.Session()
|
||||||
|
|
||||||
log.debug(f'Opening URL: {url} with params: {fork_params}', section,
|
log.debug(f'Opening URL: {url} with params: {fork_params}')
|
||||||
)
|
|
||||||
if not apikey and username and password:
|
if not apikey and username and password:
|
||||||
login = f'{web_root}/login'
|
login = f'{web_root}/login'
|
||||||
login_params = {'username': username, 'password': password}
|
login_params = {'username': username, 'password': password}
|
||||||
|
@ -569,7 +568,7 @@ def process(
|
||||||
# f'{section}: Failed to post-process {input_name}'
|
# f'{section}: Failed to post-process {input_name}'
|
||||||
# )
|
# )
|
||||||
|
|
||||||
url2 = nzb2media.utils.common.create_url(scheme, host, port, route)
|
url2 = nzb2media.utils.common.create_url(scheme, host, port, route2)
|
||||||
if completed_download_handling(url2, headers, section=section):
|
if completed_download_handling(url2, headers, section=section):
|
||||||
log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.')
|
log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.')
|
||||||
return ProcessResult(
|
return ProcessResult(
|
||||||
|
|
|
@ -199,7 +199,7 @@ def extract(file_path, output_destination):
|
||||||
if res == 0: # Both Linux and Windows return 0 for successful.
|
if res == 0: # Both Linux and Windows return 0 for successful.
|
||||||
log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination}')
|
log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination}')
|
||||||
success = 1
|
success = 1
|
||||||
elif len(passwords) > 0 and not 'gunzip' in cmd:
|
elif len(passwords) > 0 and 'gunzip' not in cmd:
|
||||||
log.info('EXTRACTOR: Attempting to extract with passwords')
|
log.info('EXTRACTOR: Attempting to extract with passwords')
|
||||||
for password in passwords:
|
for password in passwords:
|
||||||
if (
|
if (
|
||||||
|
|
|
@ -161,7 +161,7 @@ class InitSickBeard:
|
||||||
token = oauth_token['access_token']
|
token = oauth_token['access_token']
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url,
|
url,
|
||||||
headers={f'Authorization': f'Bearer {token}'},
|
headers={'Authorization': f'Bearer {token}'},
|
||||||
stream=True,
|
stream=True,
|
||||||
verify=False,
|
verify=False,
|
||||||
)
|
)
|
||||||
|
@ -236,7 +236,7 @@ class InitSickBeard:
|
||||||
log.debug(f'Removing excess parameters: ' f'{sorted(excess_parameters)}')
|
log.debug(f'Removing excess parameters: ' f'{sorted(excess_parameters)}')
|
||||||
rem_params.extend(excess_parameters)
|
rem_params.extend(excess_parameters)
|
||||||
return rem_params, True
|
return rem_params, True
|
||||||
except:
|
except Exception:
|
||||||
log.error('Failed to identify optionalParameters')
|
log.error('Failed to identify optionalParameters')
|
||||||
return rem_params, False
|
return rem_params, False
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ def rename_subs(path):
|
||||||
lan = Language.fromname(word.lower())
|
lan = Language.fromname(word.lower())
|
||||||
if lan:
|
if lan:
|
||||||
break
|
break
|
||||||
except: # if we didn't find a language, try next word.
|
except Exception: # if we didn't find a language, try next word.
|
||||||
continue
|
continue
|
||||||
# rename the sub file as name.lan.ext
|
# rename the sub file as name.lan.ext
|
||||||
if not lan:
|
if not lan:
|
||||||
|
|
|
@ -26,7 +26,8 @@ def process_script():
|
||||||
|
|
||||||
|
|
||||||
def process(args):
|
def process(args):
|
||||||
"""
|
"""Process job from SABnzb.
|
||||||
|
|
||||||
SABnzbd arguments:
|
SABnzbd arguments:
|
||||||
1. The final directory of the job (full path)
|
1. The final directory of the job (full path)
|
||||||
2. The original name of the NZB file
|
2. The original name of the NZB file
|
||||||
|
|
|
@ -922,7 +922,7 @@ def mount_iso(
|
||||||
nzb2media.MOUNTED = (
|
nzb2media.MOUNTED = (
|
||||||
mount_point # Allows us to verify this has been done and then cleanup.
|
mount_point # Allows us to verify this has been done and then cleanup.
|
||||||
)
|
)
|
||||||
for root, dirs, files in os.walk(mount_point):
|
for root, _dirs, files in os.walk(mount_point):
|
||||||
for file in files:
|
for file in files:
|
||||||
full_path = os.path.join(root, file)
|
full_path = os.path.join(root, file)
|
||||||
if (
|
if (
|
||||||
|
@ -1118,7 +1118,8 @@ def combine_mts(mts_path):
|
||||||
def combine_cd(combine):
|
def combine_cd(combine):
|
||||||
new_files = []
|
new_files = []
|
||||||
for item in {
|
for item in {
|
||||||
re.match('(.+)[cC][dD][0-9].', item).groups()[0] for item in combine
|
re.match('(.+)[cC][dD][0-9].', ea_item).groups()[0]
|
||||||
|
for ea_item in combine
|
||||||
}:
|
}:
|
||||||
concat = ''
|
concat = ''
|
||||||
for n in range(99):
|
for n in range(99):
|
||||||
|
|
|
@ -110,7 +110,7 @@ def parse_synods(args):
|
||||||
task = [task for task in tasks if task['id'] == input_id][0]
|
task = [task for task in tasks if task['id'] == input_id][0]
|
||||||
input_id = task['id']
|
input_id = task['id']
|
||||||
input_directory = task['additional']['detail']['destination']
|
input_directory = task['additional']['detail']['destination']
|
||||||
except:
|
except Exception:
|
||||||
log.error('unable to find download details in Synology DS')
|
log.error('unable to find download details in Synology DS')
|
||||||
# Syno paths appear to be relative. Let's test to see if the returned path exists, and if not append to /volume1/
|
# Syno paths appear to be relative. Let's test to see if the returned path exists, and if not append to /volume1/
|
||||||
if not os.path.isdir(input_directory):
|
if not os.path.isdir(input_directory):
|
||||||
|
|
|
@ -535,7 +535,7 @@ class SourceUpdateManager(UpdateManager):
|
||||||
with open(version_path, 'w') as ver_file:
|
with open(version_path, 'w') as ver_file:
|
||||||
ver_file.write(self._newest_commit_hash)
|
ver_file.write(self._newest_commit_hash)
|
||||||
except OSError as error:
|
except OSError as error:
|
||||||
log.error('Unable to write version file, update not complete: {msg}'.format(msg=error),)
|
log.error(f'Unable to write version file, update not complete: {error}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
|
|
6
tox.ini
6
tox.ini
|
@ -27,7 +27,7 @@ deps =
|
||||||
pytest-cov
|
pytest-cov
|
||||||
-rrequirements.txt
|
-rrequirements.txt
|
||||||
commands =
|
commands =
|
||||||
{posargs:pytest --cov --cov-report=term-missing --cov-branch tests}
|
{posargs:pytest -vvv --cov --cov-report=term-missing --cov-branch tests}
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
max-line-length = 79
|
max-line-length = 79
|
||||||
|
@ -47,8 +47,10 @@ exclude =
|
||||||
ignore =
|
ignore =
|
||||||
; -- flake8 --
|
; -- flake8 --
|
||||||
; E501 line too long
|
; E501 line too long
|
||||||
|
; E722 do not use bare 'except' (duplicates B001)
|
||||||
|
; W503 line break before binary operator
|
||||||
; W505 doc line too long
|
; W505 doc line too long
|
||||||
E501, W505
|
E501, E722, W503, W505
|
||||||
|
|
||||||
; -- flake8-docstrings --
|
; -- flake8-docstrings --
|
||||||
; D100 Missing docstring in public module
|
; D100 Missing docstring in public module
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue