From db1cc6145e7736202e8507bf5cb5b0b08c3836d1 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 01:03:13 -0500 Subject: [PATCH 01/14] Add import tests --- tests/import_test.py | 172 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 tests/import_test.py diff --git a/tests/import_test.py b/tests/import_test.py new file mode 100644 index 00000000..9b895f21 --- /dev/null +++ b/tests/import_test.py @@ -0,0 +1,172 @@ +def test_auto_process_imports(): + import nzb2media.auto_process + assert nzb2media.auto_process + + import nzb2media.auto_process.books + assert nzb2media.auto_process.books + + import nzb2media.auto_process.comics + assert nzb2media.auto_process.comics + + import nzb2media.auto_process.common + assert nzb2media.auto_process.common + + import nzb2media.auto_process.games + assert nzb2media.auto_process.games + + import nzb2media.auto_process.movies + assert nzb2media.auto_process.movies + + import nzb2media.auto_process.music + assert nzb2media.auto_process.music + + import nzb2media.auto_process.tv + assert nzb2media.auto_process.tv + + +def test_import_extractor(): + import nzb2media.extractor + assert nzb2media.extractor + + +def test_import_managers(): + import nzb2media.managers + assert nzb2media.managers + + import nzb2media.managers.pymedusa + assert nzb2media.managers.pymedusa + + import nzb2media.managers.sickbeard + assert nzb2media.managers.sickbeard + + +def test_import_nzb(): + import nzb2media.nzb + assert nzb2media.nzb + + import nzb2media.nzb.configuration + assert nzb2media.nzb.configuration + + +def test_import_plugins(): + import nzb2media.plugins + assert nzb2media.plugins + + import nzb2media.plugins.plex + assert nzb2media.plugins.plex + + import nzb2media.plugins.subtitles + assert nzb2media.plugins.subtitles + + +def test_import_processor(): + import nzb2media.processor + assert nzb2media.processor + + import nzb2media.processor.manual + assert nzb2media.processor.manual + + import nzb2media.processor.nzb + assert nzb2media.processor.nzb + + import nzb2media.processor.nzbget + assert nzb2media.processor.nzbget + + import nzb2media.processor.sab + assert nzb2media.processor.sab + + +def test_import_torrent(): + import nzb2media.torrent + assert nzb2media.torrent + + import nzb2media.torrent.configuration + assert nzb2media.torrent.configuration + + import nzb2media.torrent.deluge + assert nzb2media.torrent.deluge + + import nzb2media.torrent.qbittorrent + assert nzb2media.torrent.qbittorrent + + import nzb2media.torrent.synology + assert nzb2media.torrent.synology + + import nzb2media.torrent.transmission + assert nzb2media.torrent.transmission + + import nzb2media.torrent.utorrent + assert nzb2media.torrent.utorrent + + +def test_import_utils(): + import nzb2media.utils + assert nzb2media.utils + + import nzb2media.utils.common + assert nzb2media.utils.common + + import nzb2media.utils.download_info + assert nzb2media.utils.download_info + + import nzb2media.utils.encoding + assert nzb2media.utils.encoding + + import nzb2media.utils.files + assert nzb2media.utils.files + + import nzb2media.utils.identification + assert nzb2media.utils.identification + + import nzb2media.utils.links + assert nzb2media.utils.links + + import nzb2media.utils.naming + assert nzb2media.utils.naming + + import nzb2media.utils.network + assert nzb2media.utils.network + + import nzb2media.utils.nzb + assert nzb2media.utils.nzb + + import nzb2media.utils.parsers + assert nzb2media.utils.parsers + + import nzb2media.utils.paths + assert nzb2media.utils.paths + + import nzb2media.utils.processes + assert nzb2media.utils.processes + + import nzb2media.utils.torrent + assert nzb2media.utils.torrent + + +def test_import_nzb2media(): + import nzb2media + assert nzb2media + + import nzb2media.configuration + assert nzb2media.configuration + + import nzb2media.databases + assert nzb2media.databases + + import nzb2media.github_api + assert nzb2media.github_api + + import nzb2media.main_db + assert nzb2media.main_db + + import nzb2media.scene_exceptions + assert nzb2media.scene_exceptions + + import nzb2media.transcoder + assert nzb2media.transcoder + + import nzb2media.user_scripts + assert nzb2media.user_scripts + + import nzb2media.version_check + assert nzb2media.version_check From 78f28f382eaa1773bda30ace04abc568df4d8720 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 02:29:40 -0500 Subject: [PATCH 02/14] Fix tests and flake --- nzb2media/__init__.py | 12 +++++------- nzb2media/auto_process/books.py | 1 - nzb2media/auto_process/comics.py | 1 - nzb2media/auto_process/games.py | 1 - nzb2media/auto_process/movies.py | 1 - nzb2media/auto_process/music.py | 1 - nzb2media/auto_process/tv.py | 5 ++--- nzb2media/extractor/__init__.py | 2 +- nzb2media/managers/sickbeard.py | 4 ++-- nzb2media/plugins/subtitles.py | 2 +- nzb2media/processor/sab.py | 3 ++- nzb2media/transcoder.py | 5 +++-- nzb2media/utils/parsers.py | 2 +- nzb2media/version_check.py | 2 +- tox.ini | 6 ++++-- 15 files changed, 22 insertions(+), 26 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index feaaf24c..6222d4ef 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -202,13 +202,11 @@ FORKS: typing.Mapping[str, typing.Mapping] = { ALL_FORKS = { k: None for k in set( - list( - itertools.chain.from_iterable( - [ - FORKS[x].keys() - for x in FORKS.keys() - ], - ), + itertools.chain.from_iterable( + [ + FORKS[x].keys() + for x in FORKS.keys() + ], ), ) } diff --git a/nzb2media/auto_process/books.py b/nzb2media/auto_process/books.py index f7b573b6..32221740 100644 --- a/nzb2media/auto_process/books.py +++ b/nzb2media/auto_process/books.py @@ -6,7 +6,6 @@ import requests import nzb2media from nzb2media.auto_process.common import ProcessResult -from nzb2media.utils.common import flatten from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.network import server_responding from nzb2media.utils.paths import remote_dir diff --git a/nzb2media/auto_process/comics.py b/nzb2media/auto_process/comics.py index 2a9298a2..ddff35ea 100644 --- a/nzb2media/auto_process/comics.py +++ b/nzb2media/auto_process/comics.py @@ -7,7 +7,6 @@ import requests import nzb2media from nzb2media.auto_process.common import ProcessResult -from nzb2media.utils.common import flatten from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.network import server_responding from nzb2media.utils.paths import remote_dir diff --git a/nzb2media/auto_process/games.py b/nzb2media/auto_process/games.py index 332a24cd..14427aef 100644 --- a/nzb2media/auto_process/games.py +++ b/nzb2media/auto_process/games.py @@ -8,7 +8,6 @@ import requests import nzb2media from nzb2media.auto_process.common import ProcessResult -from nzb2media.utils.common import flatten from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.network import server_responding diff --git a/nzb2media/auto_process/movies.py b/nzb2media/auto_process/movies.py index 12028a18..7d875976 100644 --- a/nzb2media/auto_process/movies.py +++ b/nzb2media/auto_process/movies.py @@ -15,7 +15,6 @@ from nzb2media.auto_process.common import completed_download_handling from nzb2media.plugins.subtitles import import_subs from nzb2media.plugins.subtitles import rename_subs from nzb2media.scene_exceptions import process_all_exceptions -from nzb2media.utils.common import flatten from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.files import list_media_files from nzb2media.utils.identification import find_imdbid diff --git a/nzb2media/auto_process/music.py b/nzb2media/auto_process/music.py index 2e31b535..67c85e08 100644 --- a/nzb2media/auto_process/music.py +++ b/nzb2media/auto_process/music.py @@ -11,7 +11,6 @@ import nzb2media from nzb2media.auto_process.common import ProcessResult from nzb2media.auto_process.common import command_complete from nzb2media.scene_exceptions import process_all_exceptions -from nzb2media.utils.common import flatten from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.files import list_media_files from nzb2media.utils.network import server_responding diff --git a/nzb2media/auto_process/tv.py b/nzb2media/auto_process/tv.py index c2e3ea26..39c29b5d 100644 --- a/nzb2media/auto_process/tv.py +++ b/nzb2media/auto_process/tv.py @@ -394,8 +394,7 @@ def process( else: s = requests.Session() - log.debug(f'Opening URL: {url} with params: {fork_params}', section, - ) + log.debug(f'Opening URL: {url} with params: {fork_params}') if not apikey and username and password: login = f'{web_root}/login' login_params = {'username': username, 'password': password} @@ -569,7 +568,7 @@ def process( # f'{section}: Failed to post-process {input_name}' # ) - url2 = nzb2media.utils.common.create_url(scheme, host, port, route) + url2 = nzb2media.utils.common.create_url(scheme, host, port, route2) if completed_download_handling(url2, headers, section=section): log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.') return ProcessResult( diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index 6b1c7cb8..c2c25b70 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -199,7 +199,7 @@ def extract(file_path, output_destination): if res == 0: # Both Linux and Windows return 0 for successful. log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination}') success = 1 - elif len(passwords) > 0 and not 'gunzip' in cmd: + elif len(passwords) > 0 and 'gunzip' not in cmd: log.info('EXTRACTOR: Attempting to extract with passwords') for password in passwords: if ( diff --git a/nzb2media/managers/sickbeard.py b/nzb2media/managers/sickbeard.py index 3aac8329..e877d39e 100644 --- a/nzb2media/managers/sickbeard.py +++ b/nzb2media/managers/sickbeard.py @@ -161,7 +161,7 @@ class InitSickBeard: token = oauth_token['access_token'] response = requests.get( url, - headers={f'Authorization': f'Bearer {token}'}, + headers={'Authorization': f'Bearer {token}'}, stream=True, verify=False, ) @@ -236,7 +236,7 @@ class InitSickBeard: log.debug(f'Removing excess parameters: ' f'{sorted(excess_parameters)}') rem_params.extend(excess_parameters) return rem_params, True - except: + except Exception: log.error('Failed to identify optionalParameters') return rem_params, False diff --git a/nzb2media/plugins/subtitles.py b/nzb2media/plugins/subtitles.py index 97c9278b..93703cf5 100644 --- a/nzb2media/plugins/subtitles.py +++ b/nzb2media/plugins/subtitles.py @@ -87,7 +87,7 @@ def rename_subs(path): lan = Language.fromname(word.lower()) if lan: break - except: # if we didn't find a language, try next word. + except Exception: # if we didn't find a language, try next word. continue # rename the sub file as name.lan.ext if not lan: diff --git a/nzb2media/processor/sab.py b/nzb2media/processor/sab.py index 6bee57a4..578677bc 100644 --- a/nzb2media/processor/sab.py +++ b/nzb2media/processor/sab.py @@ -26,7 +26,8 @@ def process_script(): def process(args): - """ + """Process job from SABnzb. + SABnzbd arguments: 1. The final directory of the job (full path) 2. The original name of the NZB file diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index 98559dcd..dbe8ba3a 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -922,7 +922,7 @@ def mount_iso( nzb2media.MOUNTED = ( mount_point # Allows us to verify this has been done and then cleanup. ) - for root, dirs, files in os.walk(mount_point): + for root, _dirs, files in os.walk(mount_point): for file in files: full_path = os.path.join(root, file) if ( @@ -1118,7 +1118,8 @@ def combine_mts(mts_path): def combine_cd(combine): new_files = [] for item in { - re.match('(.+)[cC][dD][0-9].', item).groups()[0] for item in combine + re.match('(.+)[cC][dD][0-9].', ea_item).groups()[0] + for ea_item in combine }: concat = '' for n in range(99): diff --git a/nzb2media/utils/parsers.py b/nzb2media/utils/parsers.py index f9edc490..3e237090 100644 --- a/nzb2media/utils/parsers.py +++ b/nzb2media/utils/parsers.py @@ -110,7 +110,7 @@ def parse_synods(args): task = [task for task in tasks if task['id'] == input_id][0] input_id = task['id'] input_directory = task['additional']['detail']['destination'] - except: + except Exception: log.error('unable to find download details in Synology DS') # Syno paths appear to be relative. Let's test to see if the returned path exists, and if not append to /volume1/ if not os.path.isdir(input_directory): diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index a13e5e05..15a36029 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -535,7 +535,7 @@ class SourceUpdateManager(UpdateManager): with open(version_path, 'w') as ver_file: ver_file.write(self._newest_commit_hash) except OSError as error: - log.error('Unable to write version file, update not complete: {msg}'.format(msg=error),) + log.error(f'Unable to write version file, update not complete: {error}') return False except Exception as error: diff --git a/tox.ini b/tox.ini index 92d36ed8..3a9471b1 100644 --- a/tox.ini +++ b/tox.ini @@ -27,7 +27,7 @@ deps = pytest-cov -rrequirements.txt commands = - {posargs:pytest --cov --cov-report=term-missing --cov-branch tests} + {posargs:pytest -vvv --cov --cov-report=term-missing --cov-branch tests} [flake8] max-line-length = 79 @@ -47,8 +47,10 @@ exclude = ignore = ; -- flake8 -- ; E501 line too long +; E722 do not use bare 'except' (duplicates B001) +; W503 line break before binary operator ; W505 doc line too long - E501, W505 + E501, E722, W503, W505 ; -- flake8-docstrings -- ; D100 Missing docstring in public module From 43ae9805e3621fe9fc0455e7a7bf5ab8626af4f3 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 02:31:52 -0500 Subject: [PATCH 03/14] Use f-strings --- nzb2media/version_check.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index 15a36029..c96db2ca 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -116,7 +116,7 @@ class GitUpdateManager(UpdateManager): else: main_git = 'git' - log.debug('Checking if we can use git commands: {git} {cmd}'.format(git=main_git, cmd=test_cmd)) + log.debug(f'Checking if we can use git commands: {main_git} {test_cmd}') output, err, exit_status = self._run_git(main_git, test_cmd) if exit_status == 0: @@ -141,7 +141,7 @@ class GitUpdateManager(UpdateManager): log.debug('Trying known alternative git locations') for cur_git in alternative_git: - log.debug('Checking if we can use git commands: {git} {cmd}'.format(git=cur_git, cmd=test_cmd)) + log.debug(f'Checking if we can use git commands: {cur_git} {test_cmd}') output, err, exit_status = self._run_git(cur_git, test_cmd) if exit_status == 0: @@ -172,7 +172,7 @@ class GitUpdateManager(UpdateManager): cmd = f'{git_path} {args}' try: - log.debug('Executing {cmd} with your shell in {directory}'.format(cmd=cmd, directory=nzb2media.APP_ROOT)) + log.debug(f'Executing {cmd} with your shell in {nzb2media.APP_ROOT}') p = subprocess.Popen( cmd, stdin=subprocess.PIPE, @@ -203,7 +203,7 @@ class GitUpdateManager(UpdateManager): log.debug(f'{cmd} returned : {output}') else: if nzb2media.LOG_GIT: - log.debug('{cmd} returned : {output}, treat as error for now'.format(cmd=cmd, output=output)) + log.debug(f'{cmd} returned : {output}, treat as error for now') exit_status = 1 return output, err, exit_status @@ -297,11 +297,11 @@ class GitUpdateManager(UpdateManager): log.debug('git didn\'t return numbers for behind and ahead, not using it') return - log.debug('cur_commit = {current} % (newest_commit)= {new}, num_commits_behind = {x}, num_commits_ahead = {y}'.format(current=self._cur_commit_hash, new=self._newest_commit_hash, x=self._num_commits_behind, y=self._num_commits_ahead)) + log.debug(f'cur_commit = {self._cur_commit_hash} % (newest_commit)= {self._newest_commit_hash}, num_commits_behind = {self._num_commits_behind}, num_commits_ahead = {self._num_commits_ahead}') def set_newest_text(self): if self._num_commits_ahead: - log.error('Local branch is ahead of {branch}. Automatic update not possible.'.format(branch=self.branch)) + log.error(f'Local branch is ahead of {self.branch}. Automatic update not possible.') elif self._num_commits_behind: log.info('There is a newer version available (you\'re {x} commit{s} behind)'.format(x=self._num_commits_behind, s='s' if self._num_commits_behind > 1 else '')) else: @@ -434,7 +434,7 @@ class SourceUpdateManager(UpdateManager): # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 - log.debug('cur_commit = {current} % (newest_commit)= {new}, num_commits_behind = {x}'.format(current=self._cur_commit_hash, new=self._newest_commit_hash, x=self._num_commits_behind)) + log.debug(f'cur_commit = {self._cur_commit_hash} % (newest_commit)= {self._newest_commit_hash}, num_commits_behind = {self._num_commits_behind}') def set_newest_text(self): @@ -451,7 +451,7 @@ class SourceUpdateManager(UpdateManager): def update(self): """Download and install latest source tarball from github.""" tar_download_url = ( - 'https://github.com/{org}/{repo}/tarball/{branch}'.format(org=self.github_repo_user, repo=self.github_repo, branch=self.branch) + f'https://github.com/{self.github_repo_user}/{self.github_repo}/tarball/{self.branch}' ) version_path = os.path.join(nzb2media.APP_ROOT, 'version.txt') @@ -474,11 +474,11 @@ class SourceUpdateManager(UpdateManager): urlretrieve(tar_download_url, tar_download_path) if not os.path.isfile(tar_download_path): - log.error('Unable to retrieve new version from {url}, can\'t update'.format(url=tar_download_url)) + log.error(f'Unable to retrieve new version from {tar_download_url}, can\'t update') return False if not tarfile.is_tarfile(tar_download_path): - log.error('Retrieved version from {url} is corrupt, can\'t update'.format(url=tar_download_url)) + log.error(f'Retrieved version from {tar_download_url} is corrupt, can\'t update') return False # extract to sb-update dir @@ -503,7 +503,7 @@ class SourceUpdateManager(UpdateManager): content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder - log.info('Moving files from {source} to {destination}'.format(source=content_dir, destination=nzb2media.APP_ROOT)) + log.info(f'Moving files from {content_dir} to {nzb2media.APP_ROOT}') for dirname, _, filenames in os.walk( content_dir, ): # @UnusedVariable @@ -521,7 +521,7 @@ class SourceUpdateManager(UpdateManager): os.remove(new_path) os.renames(old_path, new_path) except Exception as error: - log.debug('Unable to update {path}: {msg}'.format(path=new_path, msg=error)) + log.debug(f'Unable to update {new_path}: {error}') # Trash the updated file without moving in new path os.remove(old_path) continue From 6535c4f9b90b3be25e9c1bf132fa3e85c5938b19 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 02:33:17 -0500 Subject: [PATCH 04/14] Fix logging --- nzb2media/auto_process/music.py | 2 +- nzb2media/managers/pymedusa.py | 2 +- nzb2media/managers/sickbeard.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nzb2media/auto_process/music.py b/nzb2media/auto_process/music.py index 67c85e08..8681c10d 100644 --- a/nzb2media/auto_process/music.py +++ b/nzb2media/auto_process/music.py @@ -218,7 +218,7 @@ def process( and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name ): - log.postprocess(f'Deleting failed files and folder {dir_name}') + log.debug(f'Deleting failed files and folder {dir_name}') remove_dir(dir_name) # Return as failed to flag this in the downloader. return ProcessResult.failure( diff --git a/nzb2media/managers/pymedusa.py b/nzb2media/managers/pymedusa.py index b3746b15..cf475fff 100644 --- a/nzb2media/managers/pymedusa.py +++ b/nzb2media/managers/pymedusa.py @@ -185,7 +185,7 @@ class PyMedusaApiV2(SickBeard): # Log Medusa's PP logs here. if response.get('output'): for line in response['output']: - log.postprocess(line) + log.debug(line) # For now this will most likely always be True. # In the future we could return an exit state for when the PP in diff --git a/nzb2media/managers/sickbeard.py b/nzb2media/managers/sickbeard.py index e877d39e..3a7b2f77 100644 --- a/nzb2media/managers/sickbeard.py +++ b/nzb2media/managers/sickbeard.py @@ -605,7 +605,7 @@ class SickBeard: for line in response.iter_lines(): if line: line = line.decode('utf-8') - log.postprocess(line) + log.debug(line) # if 'Moving file from' in line: # input_name = os.path.split(line)[1] # if 'added to the queue' in line: From 87fa36eed22cc7cbcda72c40a6b8ad27d11e27ac Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 02:48:04 -0500 Subject: [PATCH 05/14] Fix circular imports --- nzb2media/__init__.py | 45 +++++++++----------------------- nzb2media/auto_process/movies.py | 6 +++-- nzb2media/auto_process/music.py | 3 ++- nzb2media/auto_process/tv.py | 6 +++-- nzb2media/utils/processes.py | 9 +++---- 5 files changed, 26 insertions(+), 43 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index 6222d4ef..0dc0941f 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -12,6 +12,19 @@ import sys import time import typing +from nzb2media import main_db +from nzb2media import version_check +from nzb2media import databases +from nzb2media.configuration import config +from nzb2media.nzb.configuration import configure_nzbs +from nzb2media.plugins.plex import configure_plex +from nzb2media.torrent.configuration import configure_torrent_class +from nzb2media.torrent.configuration import configure_torrents +from nzb2media.utils.files import make_dir +from nzb2media.utils.network import wake_up +from nzb2media.utils.processes import RunningProcess +from nzb2media.utils.processes import restart + log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) @@ -47,38 +60,6 @@ CONFIG_TV_FILE = APP_ROOT / 'autoProcessTv.cfg' TEST_FILE = APP_ROOT / 'tests' / 'test.mp4' MYAPP = None -from nzb2media import main_db -from nzb2media import version_check -from nzb2media import databases -from nzb2media import transcoder -from nzb2media.configuration import config -from nzb2media.nzb.configuration import configure_nzbs -from nzb2media.plugins.plex import configure_plex -from nzb2media.torrent.configuration import configure_torrent_class -from nzb2media.torrent.configuration import configure_torrents -from nzb2media.utils.common import clean_dir -from nzb2media.utils.common import flatten -from nzb2media.utils.common import get_dirs -from nzb2media.utils.download_info import get_download_info -from nzb2media.utils.download_info import update_download_info_status -from nzb2media.utils.files import copy_link -from nzb2media.utils.files import extract_files -from nzb2media.utils.files import list_media_files -from nzb2media.utils.files import make_dir -from nzb2media.utils.files import sanitize_name -from nzb2media.utils.identification import category_search -from nzb2media.utils.network import wake_up -from nzb2media.utils.parsers import parse_args -from nzb2media.utils.paths import rchmod -from nzb2media.utils.paths import remove_dir -from nzb2media.utils.paths import remove_read_only -from nzb2media.utils.processes import RunningProcess -from nzb2media.utils.processes import restart -from nzb2media.utils.torrent import pause_torrent -from nzb2media.utils.torrent import remove_torrent -from nzb2media.utils.torrent import resume_torrent - - __version__ = '12.1.11' # Client Agents diff --git a/nzb2media/auto_process/movies.py b/nzb2media/auto_process/movies.py index 7d875976..db73c7c4 100644 --- a/nzb2media/auto_process/movies.py +++ b/nzb2media/auto_process/movies.py @@ -16,11 +16,13 @@ from nzb2media.plugins.subtitles import import_subs from nzb2media.plugins.subtitles import rename_subs from nzb2media.scene_exceptions import process_all_exceptions from nzb2media.utils.encoding import convert_to_ascii +from nzb2media.utils.files import extract_files from nzb2media.utils.files import list_media_files from nzb2media.utils.identification import find_imdbid from nzb2media.utils.network import find_download from nzb2media.utils.network import server_responding from nzb2media.utils.nzb import report_nzb +from nzb2media.utils.paths import rchmod from nzb2media.utils.paths import remote_dir from nzb2media.utils.paths import remove_dir @@ -142,7 +144,7 @@ def process( and extract ): log.debug(f'Checking for archives to extract in directory: {dir_name}') - nzb2media.extract_files(dir_name) + extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) good_files = 0 @@ -206,7 +208,7 @@ def process( log.debug(f'Config setting \'chmodDirectory\' currently set to {oct(chmod_directory)}') if chmod_directory: log.info(f'Attempting to set the octal permission of \'{oct(chmod_directory)}\' on directory \'{dir_name}\'') - nzb2media.rchmod(dir_name, chmod_directory) + rchmod(dir_name, chmod_directory) else: log.error(f'Transcoding failed for files in {dir_name}') return ProcessResult( diff --git a/nzb2media/auto_process/music.py b/nzb2media/auto_process/music.py index 8681c10d..af363f63 100644 --- a/nzb2media/auto_process/music.py +++ b/nzb2media/auto_process/music.py @@ -12,6 +12,7 @@ from nzb2media.auto_process.common import ProcessResult from nzb2media.auto_process.common import command_complete from nzb2media.scene_exceptions import process_all_exceptions from nzb2media.utils.encoding import convert_to_ascii +from nzb2media.utils.files import extract_files from nzb2media.utils.files import list_media_files from nzb2media.utils.network import server_responding from nzb2media.utils.paths import remote_dir @@ -92,7 +93,7 @@ def process( and extract ): log.debug(f'Checking for archives to extract in directory: {dir_name}') - nzb2media.extract_files(dir_name) + extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) # if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: diff --git a/nzb2media/auto_process/tv.py b/nzb2media/auto_process/tv.py index 39c29b5d..8d1d9de0 100644 --- a/nzb2media/auto_process/tv.py +++ b/nzb2media/auto_process/tv.py @@ -22,9 +22,11 @@ from nzb2media.plugins.subtitles import rename_subs from nzb2media.scene_exceptions import process_all_exceptions from nzb2media.utils.common import flatten from nzb2media.utils.encoding import convert_to_ascii +from nzb2media.utils.files import extract_files from nzb2media.utils.files import list_media_files from nzb2media.utils.network import server_responding from nzb2media.utils.nzb import report_nzb +from nzb2media.utils.paths import rchmod from nzb2media.utils.paths import remote_dir from nzb2media.utils.paths import remove_dir @@ -153,7 +155,7 @@ def process( and extract ): log.debug(f'Checking for archives to extract in directory: {dir_name}') - nzb2media.extract_files(dir_name) + extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) if list_media_files( @@ -228,7 +230,7 @@ def process( log.debug(f'Config setting \'chmodDirectory\' currently set to {oct(chmod_directory)}') if chmod_directory: log.info(f'Attempting to set the octal permission of \'{oct(chmod_directory)}\' on directory \'{dir_name}\'') - nzb2media.rchmod(dir_name, chmod_directory) + rchmod(dir_name, chmod_directory) else: log.error(f'FAILED: Transcoding failed for files in {dir_name}') return ProcessResult.failure( diff --git a/nzb2media/utils/processes.py b/nzb2media/utils/processes.py index c840fc0f..1056f73f 100644 --- a/nzb2media/utils/processes.py +++ b/nzb2media/utils/processes.py @@ -8,9 +8,6 @@ import sys import typing import nzb2media -from nzb2media import APP_FILENAME -from nzb2media import SYS_ARGV -from nzb2media import version_check if os.name == 'nt': from win32event import CreateMutex @@ -103,16 +100,16 @@ else: def restart(): - install_type = version_check.CheckVersion().install_type + install_type = nzb2media.version_check.CheckVersion().install_type status = 0 popen_list = [] if install_type in ('git', 'source'): - popen_list = [sys.executable, APP_FILENAME] + popen_list = [sys.executable, nzb2media.APP_FILENAME] if popen_list: - popen_list += SYS_ARGV + popen_list += nzb2media.SYS_ARGV log.info(f'Restarting nzbToMedia with {popen_list}') p = subprocess.Popen(popen_list, cwd=os.getcwd()) p.wait() From 35298fc6d90f9af7d0bbde854844afe9351e768c Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 03:30:28 -0500 Subject: [PATCH 06/14] Fix unnecessary not --- nzb2media/utils/torrent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nzb2media/utils/torrent.py b/nzb2media/utils/torrent.py index 794b2c30..4897ef03 100644 --- a/nzb2media/utils/torrent.py +++ b/nzb2media/utils/torrent.py @@ -23,7 +23,7 @@ torrent_clients = { def create_torrent_class(client_agent): - if not nzb2media.APP_NAME == 'TorrentToMedia.py': + if nzb2media.APP_NAME != 'TorrentToMedia.py': return # Skip loading Torrent for NZBs. try: @@ -53,7 +53,7 @@ def pause_torrent(client_agent, input_hash, input_id, input_name): def resume_torrent(client_agent, input_hash, input_id, input_name): - if not nzb2media.TORRENT_RESUME == 1: + if nzb2media.TORRENT_RESUME != 1: return log.debug(f'Starting torrent {input_name} in {client_agent}') try: From 71a242ccc1131b7f60cd10c41fcfc1d24914a55a Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 03:56:58 -0500 Subject: [PATCH 07/14] Refactor subprocess.Popen calls --- nzb2media/__init__.py | 194 ++++++++++---------------------- nzb2media/extractor/__init__.py | 11 +- nzb2media/scene_exceptions.py | 11 +- nzb2media/transcoder.py | 93 ++++++--------- nzb2media/user_scripts.py | 4 +- nzb2media/utils/processes.py | 6 +- nzb2media/version_check.py | 47 ++++---- 7 files changed, 127 insertions(+), 239 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index 0dc0941f..f9a6bc7e 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -11,6 +11,7 @@ import subprocess import sys import time import typing +from subprocess import PIPE, DEVNULL from nzb2media import main_db from nzb2media import version_check @@ -35,6 +36,16 @@ except ImportError: sys.exit('Please install pywin32') +def which(name): + proc = subprocess.Popen(['which', name], stdout=PIPE) + try: + proc_out, proc_err = proc.communicate() + except Exception: + return '' + else: + return proc_out.strip().decode() + + def module_path(module=__file__): try: path = pathlib.Path(module.__file__) @@ -288,7 +299,7 @@ MOUNTED = None GETSUBS = False TRANSCODE = None CONCAT = None -FFMPEG_PATH = None +FFMPEG_PATH = '' SYS_PATH = None DUPLICATE = None IGNOREEXTENSIONS = [] @@ -532,40 +543,36 @@ def configure_remote_paths(): def configure_niceness(): global NICENESS - - with open(os.devnull, 'w') as devnull: + try: + proc = subprocess.Popen(['nice'], stdout=DEVNULL, stderr=DEVNULL) + proc.communicate() + niceness = CFG['Posix']['niceness'] + if ( + len(niceness.split(',')) > 1 + ): # Allow passing of absolute command, not just value. + NICENESS.extend(niceness.split(',')) + else: + NICENESS.extend(['nice', f'-n{int(niceness)}']) + except Exception: + pass + try: + proc = subprocess.Popen(['ionice'], stdout=DEVNULL, stderr=DEVNULL) + proc.communicate() try: - subprocess.Popen( - ['nice'], stdout=devnull, stderr=devnull, - ).communicate() - niceness = CFG['Posix']['niceness'] - if ( - len(niceness.split(',')) > 1 - ): # Allow passing of absolute command, not just value. - NICENESS.extend(niceness.split(',')) + ionice = CFG['Posix']['ionice_class'] + NICENESS.extend(['ionice', f'-c{int(ionice)}']) + except Exception: + pass + try: + if 'ionice' in NICENESS: + ionice = CFG['Posix']['ionice_classdata'] + NICENESS.extend([f'-n{int(ionice)}']) else: - NICENESS.extend(['nice', f'-n{int(niceness)}']) - except Exception: - pass - try: - subprocess.Popen( - ['ionice'], stdout=devnull, stderr=devnull, - ).communicate() - try: - ionice = CFG['Posix']['ionice_class'] - NICENESS.extend(['ionice', f'-c{int(ionice)}']) - except Exception: - pass - try: - if 'ionice' in NICENESS: - ionice = CFG['Posix']['ionice_classdata'] - NICENESS.extend([f'-n{int(ionice)}']) - else: - NICENESS.extend(['ionice', f'-n{int(ionice)}']) - except Exception: - pass + NICENESS.extend(['ionice', f'-n{int(ionice)}']) except Exception: pass + except Exception: + pass def configure_containers(): @@ -1413,123 +1420,36 @@ def configure_utility_locations(): else: if SYS_PATH: os.environ['PATH'] += ':' + SYS_PATH - try: - SEVENZIP = ( - subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass + + SEVENZIP = which('7z') or which('7zr') or which('7za') if not SEVENZIP: - try: - SEVENZIP = ( - subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass - if not SEVENZIP: - try: - SEVENZIP = ( - subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass - if not SEVENZIP: - SEVENZIP = None log.warning('Failed to locate 7zip. Transcoding of disk images and extraction of .7z files will not be possible!') - try: - PAR2CMD = ( - subprocess.Popen(['which', 'par2'], stdout=subprocess.PIPE) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass + + PAR2CMD = which('par2') if not PAR2CMD: PAR2CMD = None log.warning('Failed to locate par2. Repair and rename using par files will not be possible!') - if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffmpeg')) or os.access( - os.path.join(FFMPEG_PATH, 'ffmpeg'), - os.X_OK, - ): - FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg') - elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avconv')) or os.access( - os.path.join(FFMPEG_PATH, 'avconv'), - os.X_OK, - ): - FFMPEG = os.path.join(FFMPEG_PATH, 'avconv') + + ffmpeg_bin = os.path.join(FFMPEG_PATH, 'ffmpeg') + avconv_bin = os.path.join(FFMPEG_PATH, 'avconv') + if os.path.isfile(ffmpeg_bin) or os.access(ffmpeg_bin, os.X_OK): + FFMPEG = ffmpeg_bin + elif os.path.isfile(avconv_bin) or os.access(avconv_bin, os.X_OK): + FFMPEG = avconv_bin else: - try: - FFMPEG = ( - subprocess.Popen( - ['which', 'ffmpeg'], stdout=subprocess.PIPE, - ) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass - if not FFMPEG: - try: - FFMPEG = ( - subprocess.Popen( - ['which', 'avconv'], stdout=subprocess.PIPE, - ) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass + FFMPEG = which('ffmpeg') or which('avconv') if not FFMPEG: FFMPEG = None log.warning('Failed to locate ffmpeg. Transcoding disabled!') log.warning('Install ffmpeg with x264 support to enable this feature ...') - - if os.path.isfile(os.path.join(FFMPEG_PATH, 'ffprobe')) or os.access( - os.path.join(FFMPEG_PATH, 'ffprobe'), - os.X_OK, - ): - FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe') - elif os.path.isfile(os.path.join(FFMPEG_PATH, 'avprobe')) or os.access( - os.path.join(FFMPEG_PATH, 'avprobe'), - os.X_OK, - ): - FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe') + ffprobe_bin = os.path.join(FFMPEG_PATH, 'ffprobe') + avprobe_bin = os.path.join(FFMPEG_PATH, 'avprobe') + if os.path.isfile(ffprobe_bin) or os.access(ffprobe_bin, os.X_OK): + FFPROBE = ffprobe_bin + elif os.path.isfile(avprobe_bin) or os.access(avprobe_bin, os.X_OK): + FFPROBE = avprobe_bin else: - try: - FFPROBE = ( - subprocess.Popen( - ['which', 'ffprobe'], stdout=subprocess.PIPE, - ) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass - if not FFPROBE: - try: - FFPROBE = ( - subprocess.Popen( - ['which', 'avprobe'], stdout=subprocess.PIPE, - ) - .communicate()[0] - .strip() - .decode() - ) - except Exception: - pass + FFPROBE = which('ffprobe') or which('avprobe') if not FFPROBE: FFPROBE = None if CHECK_MEDIA: diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index c2c25b70..1a3156c6 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -192,10 +192,9 @@ def extract(file_path, output_destination): cmd2 = cmd if 'gunzip' not in cmd: # gunzip doesn't support password cmd2.append('-p-') # don't prompt for password. - p = Popen( + res = Popen( cmd2, stdout=devnull, stderr=devnull, startupinfo=info, - ) # should extract files fine. - res = p.wait() + ).wait() # should extract files fine. if res == 0: # Both Linux and Windows return 0 for successful. log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination}') success = 1 @@ -210,10 +209,10 @@ def extract(file_path, output_destination): # append password here. passcmd = f'-p{password}' cmd2.append(passcmd) - p = Popen( + proc = Popen( cmd2, stdout=devnull, stderr=devnull, startupinfo=info, - ) # should extract files fine. - res = p.wait() + ) + res = proc.wait() # should extract files fine. if (res >= 0 and platform == 'Windows') or res == 0: log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination} using password: {password}') success = 1 diff --git a/nzb2media/scene_exceptions.py b/nzb2media/scene_exceptions.py index 8bc7b49f..ad909592 100644 --- a/nzb2media/scene_exceptions.py +++ b/nzb2media/scene_exceptions.py @@ -2,10 +2,10 @@ from __future__ import annotations import logging import os -import platform import re import shlex import subprocess +from subprocess import DEVNULL import nzb2media from nzb2media.utils.files import list_media_files @@ -212,10 +212,6 @@ def par2(dirname): if nzb2media.PAR2CMD and parfile: pwd = os.getcwd() # Get our Present Working Directory os.chdir(dirname) # set directory to run par on. - if platform.system() == 'Windows': - bitbucket = open('NUL') - else: - bitbucket = open('/dev/null') log.info(f'Running par2 on file {parfile}.') command = [nzb2media.PAR2CMD, 'r', parfile, '*'] cmd = '' @@ -223,9 +219,7 @@ def par2(dirname): cmd = f'{cmd} {item}' log.debug(f'calling command:{cmd}') try: - proc = subprocess.Popen( - command, stdout=bitbucket, stderr=bitbucket, - ) + proc = subprocess.Popen(command, stdout=DEVNULL, stderr=DEVNULL) proc.communicate() result = proc.returncode except Exception: @@ -233,7 +227,6 @@ def par2(dirname): if result == 0: log.info('par2 file processing succeeded') os.chdir(pwd) - bitbucket.close() # dict for custom groups diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index dbe8ba3a..a1f4ce29 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -11,6 +11,7 @@ import shutil import subprocess import sys import time +from subprocess import PIPE, DEVNULL from babelfish import Language @@ -100,22 +101,20 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): return False -def zip_out(file, img, bitbucket): - procin = None +def zip_out(file, img): + proc = None if os.path.isfile(file): cmd = ['cat', file] else: cmd = [nzb2media.SEVENZIP, '-so', 'e', img, file] try: - procin = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=bitbucket, - ) + proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) except Exception: log.error(f'Extracting [{file}] has failed') - return procin + return proc -def get_video_details(videofile, img=None, bitbucket=None): +def get_video_details(videofile, img=None): video_details = {} result = 1 file = videofile @@ -138,13 +137,11 @@ def get_video_details(videofile, img=None, bitbucket=None): ] print_cmd(command) if img: - procin = zip_out(file, img, bitbucket) - proc = subprocess.Popen( - command, stdout=subprocess.PIPE, stdin=procin.stdout, - ) + procin = zip_out(file, img) + proc = subprocess.Popen(command, stdout=PIPE, stdin=procin.stdout) procin.stdout.close() else: - proc = subprocess.Popen(command, stdout=subprocess.PIPE) + proc = subprocess.Popen(command, stdout=PIPE) out, err = proc.communicate() result = proc.returncode video_details = json.loads(out.decode()) @@ -162,13 +159,11 @@ def get_video_details(videofile, img=None, bitbucket=None): ] print_cmd(command) if img: - procin = zip_out(file, img, bitbucket) - proc = subprocess.Popen( - command, stdout=subprocess.PIPE, stdin=procin.stdout, - ) + procin = zip_out(file, img) + proc = subprocess.Popen(command, stdout=PIPE, stdin=procin.stdout) procin.stdout.close() else: - proc = subprocess.Popen(command, stdout=subprocess.PIPE) + proc = subprocess.Popen(command, stdout=PIPE) out, err = proc.communicate() result = proc.returncode video_details = json.loads(out.decode()) @@ -200,7 +195,7 @@ def check_vid_file(video_details, result): return False -def build_commands(file, new_dir, movie_name, bitbucket): +def build_commands(file, new_dir, movie_name): if isinstance(file, str): input_file = file if 'concat:' in file: @@ -228,16 +223,14 @@ def build_commands(file, new_dir, movie_name, bitbucket): new_file = [] rem_vid = [] for vid in data['files']: - video_details, result = get_video_details(vid, img, bitbucket) + video_details, result = get_video_details(vid, img) if not check_vid_file( video_details, result, ): # lets not transcode menu or other clips that don't have audio and video. rem_vid.append(vid) data['files'] = [f for f in data['files'] if f not in rem_vid] new_file = {img: {'name': data['name'], 'files': data['files']}} - video_details, result = get_video_details( - data['files'][0], img, bitbucket, - ) + video_details, result = get_video_details(data['files'][0], img) input_file = '-' file = '-' @@ -752,7 +745,7 @@ def get_subs(file): return subfiles -def extract_subs(file, newfile_path, bitbucket): +def extract_subs(file, newfile_path): video_details, result = get_video_details(file) if not video_details: return @@ -815,9 +808,9 @@ def extract_subs(file, newfile_path, bitbucket): result = 1 # set result to failed in case call fails. try: proc = subprocess.Popen( - command, stdout=bitbucket, stderr=bitbucket, + command, stdout=DEVNULL, stderr=DEVNULL, ) - out, err = proc.communicate() + proc_out, proc_error = proc.communicate() result = proc.returncode except Exception: log.error('Extracting subtitle has failed') @@ -832,7 +825,7 @@ def extract_subs(file, newfile_path, bitbucket): log.error('Extracting subtitles has failed') -def process_list(it, new_dir, bitbucket): +def process_list(it, new_dir): rem_list = [] new_list = [] combine = [] @@ -846,7 +839,7 @@ def process_list(it, new_dir, bitbucket): and ext not in nzb2media.IGNOREEXTENSIONS ): log.debug(f'Attempting to rip disk image: {item}') - new_list.extend(rip_iso(item, new_dir, bitbucket)) + new_list.extend(rip_iso(item, new_dir)) rem_list.append(item) elif ( re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', item) @@ -907,9 +900,7 @@ def process_list(it, new_dir, bitbucket): return it, rem_list, new_list, success -def mount_iso( - item, new_dir, bitbucket, -): # Currently only supports Linux Mount when permissions allow. +def mount_iso(item, new_dir): # Currently only supports Linux Mount when permissions allow. if platform.system() == 'Windows': log.error(f'No mounting options available under Windows for image file {item}') return [] @@ -917,7 +908,7 @@ def mount_iso( make_dir(mount_point) cmd = ['mount', '-o', 'loop', item, mount_point] print_cmd(cmd) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) + proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) out, err = proc.communicate() nzb2media.MOUNTED = ( mount_point # Allows us to verify this has been done and then cleanup. @@ -951,16 +942,15 @@ def mount_iso( return ['failure'] # If we got here, nothing matched our criteria -def rip_iso(item, new_dir, bitbucket): +def rip_iso(item, new_dir): new_files = [] failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. if not nzb2media.SEVENZIP: log.debug(f'No 7zip installed. Attempting to mount image file {item}') try: - new_files = mount_iso( - item, new_dir, bitbucket, - ) # Currently only works for Linux. + # Currently only works for Linux. + new_files = mount_iso(item, new_dir) except Exception: log.error(f'Failed to mount and extract from image file {item}') new_files = [failure_dir] @@ -969,7 +959,7 @@ def rip_iso(item, new_dir, bitbucket): try: log.debug(f'Attempting to extract .vob or .mts from image file {item}') print_cmd(cmd) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) + proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) out, err = proc.communicate() file_match_gen = ( re.match( @@ -1040,7 +1030,7 @@ def rip_iso(item, new_dir, bitbucket): new_files.append({item: {'name': name, 'files': combined}}) if not new_files: log.error(f'No VIDEO_TS or BDMV/SOURCE folder found in image file. Attempting to mount and scan {item}') - new_files = mount_iso(item, new_dir, bitbucket) + new_files = mount_iso(item, new_dir) except Exception: log.error(f'Failed to extract from image file {item}') new_files = [failure_dir] @@ -1159,19 +1149,12 @@ def transcode_directory(dir_name): make_dir(new_dir) else: new_dir = dir_name - if platform.system() == 'Windows': - bitbucket = open('NUL') - else: - bitbucket = open('/dev/null') movie_name = os.path.splitext(os.path.split(dir_name)[1])[0] file_list = nzb2media.list_media_files( dir_name, media=True, audio=False, meta=False, archives=False, ) - file_list, rem_list, new_list, success = process_list( - file_list, new_dir, bitbucket, - ) + file_list, rem_list, new_list, success = process_list(file_list, new_dir) if not success: - bitbucket.close() return 1, dir_name for file in file_list: @@ -1180,12 +1163,12 @@ def transcode_directory(dir_name): and os.path.splitext(file)[1] in nzb2media.IGNOREEXTENSIONS ): continue - command, file = build_commands(file, new_dir, movie_name, bitbucket) + command, file = build_commands(file, new_dir, movie_name) newfile_path = command[-1] # transcoding files may remove the original file, so make sure to extract subtitles first if nzb2media.SEXTRACT and isinstance(file, str): - extract_subs(file, newfile_path, bitbucket) + extract_subs(file, newfile_path) try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) os.remove(newfile_path) @@ -1202,19 +1185,12 @@ def transcode_directory(dir_name): result = 1 # set result to failed in case call fails. try: if isinstance(file, str): - proc = subprocess.Popen( - command, stdout=bitbucket, stderr=subprocess.PIPE, - ) + proc = subprocess.Popen(command, stdout=DEVNULL, stderr=PIPE) else: img, data = next(file.items()) - proc = subprocess.Popen( - command, - stdout=bitbucket, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE, - ) + proc = subprocess.Popen(command, stdout=DEVNULL, stderr=PIPE, stdin=PIPE) for vob in data['files']: - procin = zip_out(vob, img, bitbucket) + procin = zip_out(vob, img) if procin: log.debug(f'Feeding in file: {vob} to Transcoder') shutil.copyfileobj(procin.stdout, proc.stdin) @@ -1258,7 +1234,7 @@ def transcode_directory(dir_name): time.sleep(5) # play it safe and avoid failing to unmount. cmd = ['umount', '-l', nzb2media.MOUNTED] print_cmd(cmd) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) + proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) out, err = proc.communicate() time.sleep(5) os.rmdir(nzb2media.MOUNTED) @@ -1278,5 +1254,4 @@ def transcode_directory(dir_name): not nzb2media.PROCESSOUTPUT and nzb2media.DUPLICATE ): # We postprocess the original files to CP/SB new_dir = dir_name - bitbucket.close() return final_result, new_dir diff --git a/nzb2media/user_scripts.py b/nzb2media/user_scripts.py index ddb057a0..d761ddd0 100644 --- a/nzb2media/user_scripts.py +++ b/nzb2media/user_scripts.py @@ -118,8 +118,8 @@ def external_script(output_destination, torrent_name, torrent_label, settings): cmd = f'{cmd} {item}' log.info(f'Running script {cmd} on file {file_path}.') try: - p = Popen(command) - res = p.wait() + proc = Popen(command) + res = proc.wait() if ( str(res) in nzb2media.USER_SCRIPT_SUCCESSCODES ): # Linux returns 0 for successful. diff --git a/nzb2media/utils/processes.py b/nzb2media/utils/processes.py index 1056f73f..7682deae 100644 --- a/nzb2media/utils/processes.py +++ b/nzb2media/utils/processes.py @@ -111,8 +111,8 @@ def restart(): if popen_list: popen_list += nzb2media.SYS_ARGV log.info(f'Restarting nzbToMedia with {popen_list}') - p = subprocess.Popen(popen_list, cwd=os.getcwd()) - p.wait() - status = p.returncode + proc = subprocess.Popen(popen_list, cwd=os.getcwd()) + proc.wait() + status = proc.returncode os._exit(status) diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index c96db2ca..be7efeb8 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -11,6 +11,7 @@ import stat import subprocess import tarfile import traceback +from subprocess import PIPE, STDOUT from urllib.request import urlretrieve import nzb2media @@ -161,52 +162,52 @@ class GitUpdateManager(UpdateManager): def _run_git(self, git_path, args): - output = None - err = None + proc_out = None + proc_err = None if not git_path: log.debug('No git specified, can\'t use git commands') - exit_status = 1 - return output, err, exit_status + proc_status = 1 + return proc_out, proc_err, proc_status cmd = f'{git_path} {args}' try: log.debug(f'Executing {cmd} with your shell in {nzb2media.APP_ROOT}') - p = subprocess.Popen( + proc = subprocess.Popen( cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, + stdin=PIPE, + stdout=PIPE, + stderr=STDOUT, shell=True, cwd=nzb2media.APP_ROOT, ) - output, err = p.communicate() - exit_status = p.returncode + proc_out, proc_err = proc.communicate() + proc_status = proc.returncode - output = output.decode('utf-8') + proc_out = proc_out.decode('utf-8') - if output: - output = output.strip() + if proc_out: + proc_out = proc_out.strip() if nzb2media.LOG_GIT: - log.debug(f'git output: {output}') + log.debug(f'git output: {proc_out}') except OSError: log.error(f'Command {cmd} didn\'t work') - exit_status = 1 + proc_status = 1 - exit_status = 128 if ('fatal:' in output) or err else exit_status - if exit_status == 0: + proc_status = 128 if ('fatal:' in proc_out) or proc_err else proc_status + if proc_status == 0: log.debug(f'{cmd} : returned successful') - exit_status = 0 - elif nzb2media.LOG_GIT and exit_status in (1, 128): - log.debug(f'{cmd} returned : {output}') + proc_status = 0 + elif nzb2media.LOG_GIT and proc_status in (1, 128): + log.debug(f'{cmd} returned : {proc_out}') else: if nzb2media.LOG_GIT: - log.debug(f'{cmd} returned : {output}, treat as error for now') - exit_status = 1 + log.debug(f'{cmd} returned : {proc_out}, treat as error for now') + proc_status = 1 - return output, err, exit_status + return proc_out, proc_err, proc_status def _find_installed_version(self): """ From a4f593fc9c13ca8df1b20e3f6953a85ceed41121 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 06:45:01 -0500 Subject: [PATCH 08/14] Refactor Remove unnecessary else statements Fix variable names --- nzb2media/__init__.py | 8 +-- nzb2media/auto_process/books.py | 15 +++--- nzb2media/auto_process/comics.py | 27 +++++----- nzb2media/auto_process/common.py | 38 +++++++------- nzb2media/auto_process/games.py | 23 ++++----- nzb2media/auto_process/movies.py | 73 +++++++++++++------------- nzb2media/auto_process/music.py | 63 ++++++++++++----------- nzb2media/auto_process/tv.py | 79 ++++++++++++++-------------- nzb2media/configuration.py | 18 +++---- nzb2media/extractor/__init__.py | 19 ++++--- nzb2media/main_db.py | 28 +++++----- nzb2media/managers/pymedusa.py | 6 +-- nzb2media/managers/sickbeard.py | 12 ++--- nzb2media/processor/nzb.py | 3 +- nzb2media/scene_exceptions.py | 14 ++--- nzb2media/transcoder.py | 85 +++++++++++++++---------------- nzb2media/utils/encoding.py | 35 +++++++------ nzb2media/utils/files.py | 30 +++++------ nzb2media/utils/identification.py | 22 ++++---- nzb2media/utils/links.py | 16 +++--- nzb2media/utils/network.py | 4 +- nzb2media/utils/nzb.py | 8 +-- nzb2media/utils/paths.py | 22 ++++---- nzb2media/utils/processes.py | 35 +++++++------ nzb2media/version_check.py | 42 +++++++-------- 25 files changed, 354 insertions(+), 371 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index f9a6bc7e..c401449e 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -16,7 +16,7 @@ from subprocess import PIPE, DEVNULL from nzb2media import main_db from nzb2media import version_check from nzb2media import databases -from nzb2media.configuration import config +from nzb2media.configuration import Config from nzb2media.nzb.configuration import configure_nzbs from nzb2media.plugins.plex import configure_plex from nzb2media.torrent.configuration import configure_torrent_class @@ -410,7 +410,7 @@ def configure_migration(): global CFG # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. - if not config.migrate(): + if not Config.migrate(): log.error(f'Unable to migrate config file {CONFIG_FILE}, exiting ...') if 'NZBOP_SCRIPTDIR' in os.environ: pass # We will try and read config from Environment. @@ -419,11 +419,11 @@ def configure_migration(): # run migrate to convert NzbGet data from old cfg style to new cfg style if 'NZBOP_SCRIPTDIR' in os.environ: - CFG = config.addnzbget() + CFG = Config.addnzbget() else: # load newly migrated config log.info(f'Loading config from [{CONFIG_FILE}]') - CFG = config() + CFG = Config() def configure_logging_part_2(): diff --git a/nzb2media/auto_process/books.py b/nzb2media/auto_process/books.py index 32221740..af0db050 100644 --- a/nzb2media/auto_process/books.py +++ b/nzb2media/auto_process/books.py @@ -84,18 +84,15 @@ def process( f'{section}: Failed to post-process - Server returned status ' f'{response.status_code}', ) - elif response.text == 'OK': + if response.text == 'OK': log.debug( f'SUCCESS: ForceProcess for {dir_name} has been started in LazyLibrarian', ) return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - else: - log.error( - f'FAILED: ForceProcess of {dir_name} has Failed in LazyLibrarian', - ) - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', - ) + log.error(f'FAILED: ForceProcess of {dir_name} has Failed in LazyLibrarian') + return ProcessResult.failure( + f'{section}: Failed to post-process - Returned log from {section} ' + f'was not as expected.', + ) diff --git a/nzb2media/auto_process/comics.py b/nzb2media/auto_process/comics.py index ddff35ea..b3546349 100644 --- a/nzb2media/auto_process/comics.py +++ b/nzb2media/auto_process/comics.py @@ -77,7 +77,7 @@ def process( log.debug(f'Opening URL: {url}') try: - r = requests.post( + response = requests.post( url, params=params, stream=True, verify=False, timeout=(30, 300), ) except requests.ConnectionError: @@ -86,18 +86,18 @@ def process( f'{section}: Failed to post-process - Unable to connect to ' f'{section}', ) - if r.status_code not in [ + if response.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted, ]: - log.error(f'Server returned status {r.status_code}') + log.error(f'Server returned status {response.status_code}') return ProcessResult.failure( f'{section}: Failed to post-process - Server returned status ' - f'{r.status_code}', + f'{response.status_code}', ) - for line in r.text.split('\n'): + for line in response.text.split('\n'): if line: log.debug(line) if 'Post Processing SUCCESSFUL' in line: @@ -108,12 +108,11 @@ def process( return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - else: - log.warning( - 'The issue does not appear to have successfully processed. ' - 'Please check your Logs', - ) - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from ' - f'{section} was not as expected.', - ) + log.warning( + 'The issue does not appear to have successfully processed. ' + 'Please check your Logs', + ) + return ProcessResult.failure( + f'{section}: Failed to post-process - Returned log from ' + f'{section} was not as expected.', + ) diff --git a/nzb2media/auto_process/common.py b/nzb2media/auto_process/common.py index ae57ad90..ced991ed 100644 --- a/nzb2media/auto_process/common.py +++ b/nzb2media/auto_process/common.py @@ -34,7 +34,7 @@ class ProcessResult(typing.NamedTuple): def command_complete(url, params, headers, section): try: - r = requests.get( + respone = requests.get( url, params=params, headers=headers, @@ -45,26 +45,25 @@ def command_complete(url, params, headers, section): except requests.ConnectionError: log.error(f'Unable to open URL: {url}') return None - if r.status_code not in [ + if respone.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted, ]: - log.error(f'Server returned status {r.status_code}') + log.error(f'Server returned status {respone.status_code}') + return None + try: + return respone.json()['status'] + except (ValueError, KeyError): + # ValueError catches simplejson's JSONDecodeError and + # json's ValueError + log.error(f'{section} did not return expected json data.') return None - else: - try: - return r.json()['status'] - except (ValueError, KeyError): - # ValueError catches simplejson's JSONDecodeError and - # json's ValueError - log.error(f'{section} did not return expected json data.') - return None def completed_download_handling(url2, headers, section='MAIN'): try: - r = requests.get( + response = requests.get( url2, params={}, headers=headers, @@ -75,16 +74,15 @@ def completed_download_handling(url2, headers, section='MAIN'): except requests.ConnectionError: log.error(f'Unable to open URL: {url2}') return False - if r.status_code not in [ + if response.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted, ]: - log.error(f'Server returned status {r.status_code}') + log.error(f'Server returned status {response.status_code}') + return False + try: + return response.json().get('enableCompletedDownloadHandling', False) + except ValueError: + # ValueError catches simplejson's JSONDecodeError and json's ValueError return False - else: - try: - return r.json().get('enableCompletedDownloadHandling', False) - except ValueError: - # ValueError catches simplejson's JSONDecodeError and json's ValueError - return False diff --git a/nzb2media/auto_process/games.py b/nzb2media/auto_process/games.py index 14427aef..f96169cf 100644 --- a/nzb2media/auto_process/games.py +++ b/nzb2media/auto_process/games.py @@ -72,7 +72,7 @@ def process( log.debug(f'Opening URL: {url}') try: - r = requests.get(url, params=params, verify=False, timeout=(30, 300)) + resposne = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: log.error('Unable to open URL') return ProcessResult.failure( @@ -80,7 +80,7 @@ def process( f'{section}', ) - result = r.json() + result = resposne.json() log.debug(result) if library: log.debug(f'moving files to library: {library}') @@ -98,24 +98,23 @@ def process( f'{section}', ) - if r.status_code not in [ + if resposne.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted, ]: - log.error(f'Server returned status {r.status_code}') + log.error(f'Server returned status {resposne.status_code}') return ProcessResult.failure( f'{section}: Failed to post-process - Server returned status ' - f'{r.status_code}', + f'{resposne.status_code}', ) - elif result['success']: + if result['success']: log.debug(f'SUCCESS: Status for {gamez_id} has been set to {download_status} in Gamez') return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - else: - log.error(f'FAILED: Status for {gamez_id} has NOT been updated in Gamez') - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', - ) + log.error(f'FAILED: Status for {gamez_id} has NOT been updated in Gamez') + return ProcessResult.failure( + f'{section}: Failed to post-process - Returned log from {section} ' + f'was not as expected.', + ) diff --git a/nzb2media/auto_process/movies.py b/nzb2media/auto_process/movies.py index db73c7c4..147ea977 100644 --- a/nzb2media/auto_process/movies.py +++ b/nzb2media/auto_process/movies.py @@ -320,7 +320,7 @@ def process( message=f'{section}: Failed to post-process - Server returned status {response.status_code}', status_code=1, ) - elif section == 'CouchPotato' and result['success']: + if section == 'CouchPotato' and result['success']: log.debug(f'SUCCESS: Finished {method} scan for folder {dir_name}') if method == 'manage': return ProcessResult( @@ -342,11 +342,10 @@ def process( message=f'{section}: Successfully post-processed {input_name}', status_code=status, ) - else: - return ProcessResult( - message=f'{section}: Failed to post-process - changed status to {update_movie_status}', - status_code=1, - ) + return ProcessResult( + message=f'{section}: Failed to post-process - changed status to {update_movie_status}', + status_code=1, + ) else: log.error(f'FAILED: {method} scan was unable to finish for folder {dir_name}. exiting!') return ProcessResult( @@ -366,7 +365,7 @@ def process( status_code=1, # Return as failed to flag this in the downloader. ) # Return failed flag, but log the event as successful. - elif section == 'Watcher3': + if section == 'Watcher3': log.debug(f'Sending failed download to {section} for CDH processing') path = remote_dir(dir_name) if remote_path else dir_name if input_name and os.path.isfile( @@ -437,7 +436,7 @@ def process( status_code=1, message=f'{section}: Failed to post-process - Server returned status {response.status_code}', ) - elif result['success']: + if result['success']: log.debug(f'SUCCESS: {input_name} has been set to ignored ...') else: log.warning(f'FAILED: Unable to set {input_name} to ignored!') @@ -476,17 +475,17 @@ def process( f'{section}: Failed to post-process - Server returned status ' f'{response.status_code}', ) - elif result['success']: + + if result['success']: log.debug('SUCCESS: Snatched the next highest release ...') return ProcessResult.success( f'{section}: Successfully snatched next highest release', ) - else: - log.debug('SUCCESS: Unable to find a new release to snatch now. CP will keep searching!') - return ProcessResult.success( - f'{section}: No new release found now. ' - f'{section} will keep searching', - ) + log.debug('SUCCESS: Unable to find a new release to snatch now. CP will keep searching!') + return ProcessResult.success( + f'{section}: No new release found now. ' + f'{section} will keep searching', + ) # Added a release that was not in the wanted list so confirm rename # successful by finding this movie media.list. @@ -539,7 +538,7 @@ def process( return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - elif command_status in ['failed']: + if command_status in ['failed']: log.debug('The Scan command has failed. Renaming was not successful.') # return ProcessResult( # message='{0}: Failed to post-process {1}'.format(section, input_name), @@ -552,7 +551,7 @@ def process( f'{section}: Successfully post-processed {input_name}', ) - elif not list_media_files( + if not list_media_files( dir_name, media=True, audio=False, meta=False, archives=True, ): log.debug(f'SUCCESS: Input Directory [{dir_name}] has no remaining media files. This has been fully processed.') @@ -598,17 +597,17 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): log.debug(f'Opening URL: {url} with PARAMS: {params}') try: - r = requests.get(url, params=params, verify=False, timeout=(30, 60)) + response = requests.get(url, params=params, verify=False, timeout=(30, 60)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') return results try: - result = r.json() + result = response.json() except ValueError: # ValueError catches simplejson's JSONDecodeError and json's ValueError log.error('CouchPotato returned the following non-json data') - for line in r.iter_lines(): + for line in response.iter_lines(): log.error(line) return results @@ -623,8 +622,8 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): # Gather release info and return it back, no need to narrow results if release_id: try: - cur_id = result[section]['_id'] - results[cur_id] = result[section] + key = result[section]['_id'] + results[key] = result[section] return results except Exception: pass @@ -651,38 +650,38 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): ): continue - cur_id = release['_id'] - results[cur_id] = release - results[cur_id]['title'] = movie['title'] + key = release['_id'] + results[key] = release + results[key]['title'] = movie['title'] except Exception: continue # Narrow results by removing old releases by comparing their last_edit field if len(results) > 1: rem_id = set() - for id1, x1 in results.items(): - for x2 in results.values(): + for key, val1 in results.items(): + for val2 in results.values(): try: - if x2['last_edit'] > x1['last_edit']: - rem_id.add(id1) + if val2['last_edit'] > val1['last_edit']: + rem_id.add(key) except Exception: continue - for id in rem_id: - results.pop(id) + for ea_id in rem_id: + results.pop(ea_id) # Search downloads on clients for a match to try and narrow our results down to 1 if len(results) > 1: rem_id = set() - for cur_id, x in results.items(): + for key, val1 in results.items(): try: if not find_download( - str(x['download_info']['downloader']).lower(), - x['download_info']['id'], + str(val1['download_info']['downloader']).lower(), + val1['download_info']['id'], ): - rem_id.add(cur_id) + rem_id.add(key) except Exception: continue - for id in rem_id: - results.pop(id) + for ea_id in rem_id: + results.pop(ea_id) return results diff --git a/nzb2media/auto_process/music.py b/nzb2media/auto_process/music.py index af363f63..afdf19a6 100644 --- a/nzb2media/auto_process/music.py +++ b/nzb2media/auto_process/music.py @@ -134,7 +134,7 @@ def process( f'{section}: Failed to post-process - No change in wanted status', ) - elif status == 0 and section == 'Lidarr': + if status == 0 and section == 'Lidarr': route = f'{web_root}/api/v1/command' url = nzb2media.utils.common.create_url(scheme, host, port, route) headers = {'X-Api-Key': apikey} @@ -146,7 +146,7 @@ def process( data = {'name': 'Rename', 'path': dir_name} try: log.debug(f'Opening URL: {url} with data: {data}') - r = requests.post( + response = requests.post( url, data=json.dumps(data), headers=headers, @@ -162,7 +162,7 @@ def process( ) try: - res = r.json() + res = response.json() scan_id = int(res['id']) log.debug(f'Scan started with id: {scan_id}') except Exception as error: @@ -171,28 +171,31 @@ def process( f'{section}: Failed to post-process - Unable to start scan', ) - n = 0 + num = 0 params = {} url = f'{url}/{scan_id}' - while n < 6: # set up wait_for minutes to see if command completes.. + while num < 6: # set up wait_for minutes to see if command completes.. time.sleep(10 * wait_for) command_status = command_complete(url, params, headers, section) if command_status and command_status in ['completed', 'failed']: break - n += 1 + num += 1 if command_status: log.debug(f'The Scan command return status: {command_status}') + if not os.path.exists(dir_name): log.debug(f'The directory {dir_name} has been removed. Renaming was successful.') return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - elif command_status and command_status in ['completed']: + + if command_status and command_status in ['completed']: log.debug('The Scan command has completed successfully. Renaming was successful.') return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - elif command_status and command_status in ['failed']: + + if command_status and command_status in ['failed']: log.debug('The Scan command has failed. Renaming was not successful.') # return ProcessResult.failure( # f'{section}: Failed to post-process {input_name}' @@ -212,20 +215,19 @@ def process( return ProcessResult.failure( f'{section}: Download Failed. Sending back to {section}', ) - else: - log.warning('FAILED DOWNLOAD DETECTED') - if ( - delete_failed - and os.path.isdir(dir_name) - and not os.path.dirname(dir_name) == dir_name - ): - log.debug(f'Deleting failed files and folder {dir_name}') - remove_dir(dir_name) - # Return as failed to flag this in the downloader. - return ProcessResult.failure( - f'{section}: Failed to post-process. {section} does not ' - f'support failed downloads', - ) + log.warning('FAILED DOWNLOAD DETECTED') + if ( + delete_failed + and os.path.isdir(dir_name) + and not os.path.dirname(dir_name) == dir_name + ): + log.debug(f'Deleting failed files and folder {dir_name}') + remove_dir(dir_name) + # Return as failed to flag this in the downloader. + return ProcessResult.failure( + f'{section}: Failed to post-process. {section} does not ' + f'support failed downloads', + ) return ProcessResult.failure() @@ -241,13 +243,13 @@ def get_status(url, apikey, dir_name): log.debug(f'Opening URL: {url} with PARAMS: {params}') try: - r = requests.get(url, params=params, verify=False, timeout=(30, 120)) + response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.RequestException: log.error('Unable to open URL') return None try: - result = r.json() + result = response.json() except ValueError: # ValueError catches simplejson's JSONDecodeError and json's ValueError return None @@ -267,7 +269,7 @@ def force_process( log.debug(f'Opening URL: {url} with PARAMS: {params}') try: - r = requests.get(url, params=params, verify=False, timeout=(30, 300)) + response = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') return ProcessResult.failure( @@ -275,18 +277,19 @@ def force_process( f'{section}', ) - log.debug(f'Result: {r.text}') + log.debug(f'Result: {response.text}') - if r.status_code not in [ + if response.status_code not in [ requests.codes.ok, requests.codes.created, requests.codes.accepted, ]: - log.error(f'Server returned status {r.status_code}') + log.error(f'Server returned status {response.status_code}') return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status {r.status_code}', + f'{section}: Failed to post-process - Server returned status {response.status_code}', ) - elif r.text == 'OK': + + if response.text == 'OK': log.debug(f'SUCCESS: Post-Processing started for {input_name} in folder {dir_name} ...') else: log.error(f'FAILED: Post-Processing has NOT started for {input_name} in folder {dir_name}. exiting!') diff --git a/nzb2media/auto_process/tv.py b/nzb2media/auto_process/tv.py index 8d1d9de0..49ee0041 100644 --- a/nzb2media/auto_process/tv.py +++ b/nzb2media/auto_process/tv.py @@ -393,33 +393,32 @@ def process( if section == 'SickBeard': if init_sickbeard.fork_obj: return init_sickbeard.fork_obj.api_call() - else: - s = requests.Session() + session = requests.Session() - log.debug(f'Opening URL: {url} with params: {fork_params}') - if not apikey and username and password: - login = f'{web_root}/login' - login_params = {'username': username, 'password': password} - response = s.get(login, verify=False, timeout=(30, 60)) - if response.status_code in [401, 403] and response.cookies.get('_xsrf'): - login_params['_xsrf'] = response.cookies.get('_xsrf') - s.post( - login, - data=login_params, - stream=True, - verify=False, - timeout=(30, 60), - ) - response = s.get( - url, - auth=(username, password), - params=fork_params, + log.debug(f'Opening URL: {url} with params: {fork_params}') + if not apikey and username and password: + login = f'{web_root}/login' + login_params = {'username': username, 'password': password} + response = session.get(login, verify=False, timeout=(30, 60)) + if response.status_code in [401, 403] and response.cookies.get('_xsrf'): + login_params['_xsrf'] = response.cookies.get('_xsrf') + session.post( + login, + data=login_params, stream=True, verify=False, - timeout=(30, 1800), + timeout=(30, 60), ) + response = session.get( + url, + auth=(username, password), + params=fork_params, + stream=True, + verify=False, + timeout=(30, 1800), + ) elif section == 'SiCKRAGE': - s = requests.Session() + session = requests.Session() if api_version >= 2 and sso_username and sso_password: oauth = OAuth2Session( @@ -433,7 +432,7 @@ def process( username=sso_username, password=sso_password, ) - s.headers.update( + session.headers.update( {'Authorization': 'Bearer ' + oauth_token['access_token']}, ) @@ -454,7 +453,7 @@ def process( else: params = fork_params - response = s.get( + response = session.get( url, params=params, stream=True, @@ -542,29 +541,33 @@ def process( return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - elif section == 'NzbDrone' and started: - n = 0 + + if section == 'NzbDrone' and started: + num = 0 params = {} url = f'{url}/{scan_id}' - while n < 6: # set up wait_for minutes to see if command completes.. + while num < 6: # set up wait_for minutes to see if command completes.. time.sleep(10 * wait_for) command_status = command_complete(url, params, headers, section) if command_status and command_status in ['completed', 'failed']: break - n += 1 + num += 1 if command_status: log.debug(f'The Scan command return status: {command_status}') + if not os.path.exists(dir_name): log.debug(f'The directory {dir_name} has been removed. Renaming was successful.') return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - elif command_status and command_status in ['completed']: + + if command_status and command_status in ['completed']: log.debug('The Scan command has completed successfully. Renaming was successful.') return ProcessResult.success( f'{section}: Successfully post-processed {input_name}', ) - elif command_status and command_status in ['failed']: + + if command_status and command_status in ['failed']: log.debug('The Scan command has failed. Renaming was not successful.') # return ProcessResult.failure( # f'{section}: Failed to post-process {input_name}' @@ -578,14 +581,12 @@ def process( f'Passing back to {section}', status_code=status, ) - else: - log.warning('The Scan command did not return a valid status. Renaming was not successful.') - return ProcessResult.failure( - f'{section}: Failed to post-process {input_name}', - ) - else: - # We did not receive Success confirmation. + log.warning('The Scan command did not return a valid status. Renaming was not successful.') return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', + f'{section}: Failed to post-process {input_name}', ) + # We did not receive Success confirmation. + return ProcessResult.failure( + f'{section}: Failed to post-process - Returned log from {section} ' + f'was not as expected.', + ) diff --git a/nzb2media/configuration.py b/nzb2media/configuration.py index f435acf7..a2d64ac5 100644 --- a/nzb2media/configuration.py +++ b/nzb2media/configuration.py @@ -101,15 +101,15 @@ class ConfigObj(configobj.ConfigObj, Section): self.interpolation = False @staticmethod - def find_key(node, kv): + def find_key(node, value): if isinstance(node, list): for i in node: - yield from ConfigObj.find_key(i, kv) + yield from ConfigObj.find_key(i, value) elif isinstance(node, dict): - if kv in node: - yield node[kv] + if value in node: + yield node[value] for j in node.values(): - yield from ConfigObj.find_key(j, kv) + yield from ConfigObj.find_key(j, value) @staticmethod def migrate(): @@ -121,7 +121,7 @@ class ConfigObj(configobj.ConfigObj, Section): # check for autoProcessMedia.cfg and create if it does not exist if not nzb2media.CONFIG_FILE.is_file(): shutil.copyfile(nzb2media.CONFIG_SPEC_FILE, nzb2media.CONFIG_FILE) - CFG_OLD = config(nzb2media.CONFIG_FILE) + CFG_OLD = Config(nzb2media.CONFIG_FILE) except Exception as error: log.error(f'Error {error} when copying to .cfg') @@ -129,7 +129,7 @@ class ConfigObj(configobj.ConfigObj, Section): # check for autoProcessMedia.cfg.spec and create if it does not exist if not nzb2media.CONFIG_SPEC_FILE.is_file(): shutil.copyfile(nzb2media.CONFIG_FILE, nzb2media.CONFIG_SPEC_FILE) - CFG_NEW = config(nzb2media.CONFIG_SPEC_FILE) + CFG_NEW = Config(nzb2media.CONFIG_SPEC_FILE) except Exception as error: log.error(f'Error {error} when copying to .spec') @@ -307,7 +307,7 @@ class ConfigObj(configobj.ConfigObj, Section): @staticmethod def addnzbget(): # load configs into memory - cfg_new = config() + cfg_new = Config() try: if ( @@ -1117,4 +1117,4 @@ class ConfigObj(configobj.ConfigObj, Section): configobj.Section = Section configobj.ConfigObj = ConfigObj -config = ConfigObj +Config = ConfigObj diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index 1a3156c6..569a4ae9 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -94,27 +94,27 @@ def extract(file_path, output_destination): stdout=devnull, stderr=devnull, ): # note, returns 0 if exists, or 1 if doesn't exist. - for k, v in extract_commands.items(): - if cmd in v[0]: + for key, val in extract_commands.items(): + if cmd in val[0]: if not call( ['which', '7zr'], stdout=devnull, stderr=devnull, ): # we do have '7zr' - extract_commands[k] = ['7zr', 'x', '-y'] + extract_commands[key] = ['7zr', 'x', '-y'] elif not call( ['which', '7z'], stdout=devnull, stderr=devnull, ): # we do have '7z' - extract_commands[k] = ['7z', 'x', '-y'] + extract_commands[key] = ['7z', 'x', '-y'] elif not call( ['which', '7za'], stdout=devnull, stderr=devnull, ): # we do have '7za' - extract_commands[k] = ['7za', 'x', '-y'] + extract_commands[key] = ['7za', 'x', '-y'] else: - log.error(f'EXTRACTOR: {cmd} not found, disabling support for {k}') - del extract_commands[k] + log.error(f'EXTRACTOR: {cmd} not found, disabling support for {key}') + del extract_commands[key] devnull.close() else: log.warning('EXTRACTOR: Cannot determine which tool to use when called from Transmission') @@ -246,6 +246,5 @@ def extract(file_path, output_destination): except Exception: pass return True - else: - log.error(f'EXTRACTOR: Extraction failed for {file_path}. Result was {res}') - return False + log.error(f'EXTRACTOR: Extraction failed for {file_path}. Result was {res}') + return False diff --git a/nzb2media/main_db.py b/nzb2media/main_db.py index e1969ced..e3d410ab 100644 --- a/nzb2media/main_db.py +++ b/nzb2media/main_db.py @@ -37,14 +37,13 @@ class DBConnection: result = None try: result = self.select('SELECT db_version FROM db_version') - except sqlite3.OperationalError as e: - if 'no such table: db_version' in e.args[0]: + except sqlite3.OperationalError as error: + if 'no such table: db_version' in error.args[0]: return 0 if result: return int(result[0]['db_version']) - else: - return 0 + return 0 def fetch(self, query, args=None): if query is None: @@ -94,16 +93,16 @@ class DBConnection: while attempt < 5: try: - for qu in querylist: - if len(qu) == 1: + for query in querylist: + if len(query) == 1: if log_transaction: - log.debug(qu[0]) - sql_result.append(self.connection.execute(qu[0])) - elif len(qu) > 1: + log.debug(query[0]) + sql_result.append(self.connection.execute(query[0])) + elif len(query) > 1: if log_transaction: - log.debug(f'{qu[0]} with args {qu[1]}') + log.debug(f'{query[0]} with args {query[1]}') sql_result.append( - self.connection.execute(qu[0], qu[1]), + self.connection.execute(query[0], query[1]), ) self.connection.commit() log.debug(f'Transaction with {len(querylist)} query\'s executed') @@ -252,8 +251,8 @@ def _process_upgrade(connection, upgrade_class): else: log.debug(f'{upgrade_class.__name__} upgrade not required') - for upgradeSubClass in upgrade_class.__subclasses__(): - _process_upgrade(connection, upgradeSubClass) + for upgrade_sub_class in upgrade_class.__subclasses__(): + _process_upgrade(connection, upgrade_sub_class) # Base migration class. All future DB changes should be subclassed from this class @@ -283,8 +282,7 @@ class SchemaUpgrade: result = self.connection.select('SELECT db_version FROM db_version') if result: return int(result[-1]['db_version']) - else: - return 0 + return 0 def inc_db_version(self): new_version = self.check_db_version() + 1 diff --git a/nzb2media/managers/pymedusa.py b/nzb2media/managers/pymedusa.py index cf475fff..d70fb403 100644 --- a/nzb2media/managers/pymedusa.py +++ b/nzb2media/managers/pymedusa.py @@ -168,19 +168,19 @@ class PyMedusaApiV2(SickBeard): return ProcessResult.failure() wait_for = int(self.sb_init.config.get('wait_for', 2)) - n = 0 + num = 0 response = {} queue_item_identifier = jdata['queueItem']['identifier'] url = f'{self.url}/{queue_item_identifier}' - while n < 12: # set up wait_for minutes to see if command completes.. + while num < 12: # set up wait_for minutes to see if command completes.. time.sleep(5 * wait_for) response = self._get_identifier_status(url) if response and response.get('success'): break if 'error' in response: break - n += 1 + num += 1 # Log Medusa's PP logs here. if response.get('output'): diff --git a/nzb2media/managers/sickbeard.py b/nzb2media/managers/sickbeard.py index 3a7b2f77..3f931d4e 100644 --- a/nzb2media/managers/sickbeard.py +++ b/nzb2media/managers/sickbeard.py @@ -52,9 +52,9 @@ class InitSickBeard: 'stheno': 'Stheno', } _val = cfg.get('fork', 'auto') - f1 = replace.get(_val, _val) + fork_name = replace.get(_val, _val) try: - self.fork = f1, nzb2media.FORKS[f1] + self.fork = fork_name, nzb2media.FORKS[fork_name] except KeyError: self.fork = 'auto' self.protocol = 'https://' if self.ssl else 'http://' @@ -83,9 +83,9 @@ class InitSickBeard: 'stheno': 'Stheno', } _val = cfg.get('fork', 'auto') - f1 = replace.get(_val.lower(), _val) + fork_name = replace.get(_val.lower(), _val) try: - self.fork = f1, nzb2media.FORKS[f1] + self.fork = fork_name, nzb2media.FORKS[fork_name] except KeyError: self.fork = 'auto' protocol = 'https://' if self.ssl else 'http://' @@ -209,9 +209,9 @@ class InitSickBeard: return self.fork, self.fork_params @staticmethod - def _api_check(r, params, rem_params): + def _api_check(response, params, rem_params): try: - json_data = r.json() + json_data = response.json() except ValueError: log.error('Failed to get JSON data from response') log.debug('Response received') diff --git a/nzb2media/processor/nzb.py b/nzb2media/processor/nzb.py index e0ac31fe..ab060380 100644 --- a/nzb2media/processor/nzb.py +++ b/nzb2media/processor/nzb.py @@ -76,8 +76,7 @@ def process( message='', status_code=-1, ) - else: - usercat = 'ALL' + usercat = 'ALL' if len(section) > 1: log.error(f'Category:[{input_category}] is not unique, {section.keys()} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.') return ProcessResult( diff --git a/nzb2media/scene_exceptions.py b/nzb2media/scene_exceptions.py index ad909592..7cef618d 100644 --- a/nzb2media/scene_exceptions.py +++ b/nzb2media/scene_exceptions.py @@ -147,17 +147,17 @@ def reverse_filename(filename, dirname, name): head, file_extension = os.path.splitext(os.path.basename(filename)) na_parts = season_pattern.search(head) if na_parts is not None: - word_p = word_pattern.findall(na_parts.group(2)) - if word_p: + match = word_pattern.findall(na_parts.group(2)) + if match: new_words = '' - for wp in word_p: - if wp[0] == '.': + for group in match: + if group[0] == '.': new_words += '.' - new_words += re.sub(r'\W', '', wp) + new_words += re.sub(r'\W', '', group) else: new_words = na_parts.group(2) - for cr in char_replace: - new_words = re.sub(cr[0], cr[1], new_words) + for each_char in char_replace: + new_words = re.sub(each_char[0], each_char[1], new_words) newname = new_words[::-1] + na_parts.group(1)[::-1] else: newname = head[::-1].title() diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index a1f4ce29..7fc439a1 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -59,8 +59,7 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): # if the download was 'failed', assume bad. # If it was successful, assume good. return False - else: - return True + return True log.info(f'Checking [{video.name}] for corruption, please stand by ...') video_details, result = get_video_details(video) @@ -96,9 +95,8 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): if len(video_streams) > 0 and len(valid_audio) > 0: log.info(f'SUCCESS: [{video.name}] has no corruption.') return True - else: - log.info(f'FAILED: [{video.name}] has {len(video_streams)} video streams and {len(audio_streams)} audio streams. Assume corruption.') - return False + log.info(f'FAILED: [{video.name}] has {len(video_streams)} video streams and {len(audio_streams)} audio streams. Assume corruption.') + return False def zip_out(file, img): @@ -191,8 +189,7 @@ def check_vid_file(video_details, result): ] if len(video_streams) > 0 and len(audio_streams) > 0: return True - else: - return False + return False def build_commands(file, new_dir, movie_name): @@ -328,7 +325,7 @@ def build_commands(file, new_dir, movie_name): for video in video_streams: codec = video['codec_name'] - fr = video.get('avg_frame_rate', 0) + frame_rate = video.get('avg_frame_rate', 0) width = video.get('width', 0) height = video.get('height', 0) scale = nzb2media.VRESOLUTION @@ -337,7 +334,7 @@ def build_commands(file, new_dir, movie_name): else: video_cmd.extend(['-c:v', nzb2media.VCODEC]) if nzb2media.VFRAMERATE and not ( - nzb2media.VFRAMERATE * 0.999 <= fr <= nzb2media.VFRAMERATE * 1.001 + nzb2media.VFRAMERATE * 0.999 <= frame_rate <= nzb2media.VFRAMERATE * 1.001 ): video_cmd.extend(['-r', str(nzb2media.VFRAMERATE)]) if scale: @@ -612,7 +609,7 @@ def build_commands(file, new_dir, movie_name): s_mapped = [] burnt = 0 - n = 0 + num = 0 for lan in nzb2media.SLANGUAGES: try: subs1 = [ @@ -702,14 +699,14 @@ def build_commands(file, new_dir, movie_name): if metlan: meta_cmd.extend( [ - f'-metadata:s:s:{len(s_mapped) + n}', + f'-metadata:s:s:{len(s_mapped) + num}', f'language={metlan.alpha3}', ], ) - n += 1 - map_cmd.extend(['-map', f'{n}:0']) + num += 1 + map_cmd.extend(['-map', f'{num}:0']) - if not nzb2media.ALLOWSUBS or (not s_mapped and not n): + if not nzb2media.ALLOWSUBS or (not s_mapped and not num): sub_cmd.extend(['-sn']) else: if nzb2media.SCODEC: @@ -774,19 +771,19 @@ def extract_subs(file, newfile_path): and item['codec_name'] != 'pgssub' ] num = len(sub_streams) - for n in range(num): - sub = sub_streams[n] + for ea_num in range(num): + sub = sub_streams[ea_num] idx = sub['index'] lan = sub.get('tags', {}).get('language', 'unk') if num == 1: output_file = os.path.join(subdir, f'{name}.srt') if os.path.isfile(output_file): - output_file = os.path.join(subdir, f'{name}.{n}.srt') + output_file = os.path.join(subdir, f'{name}.{ea_num}.srt') else: output_file = os.path.join(subdir, f'{name}.{lan}.srt') if os.path.isfile(output_file): - output_file = os.path.join(subdir, f'{name}.{lan}.{n}.srt') + output_file = os.path.join(subdir, f'{name}.{lan}.{ea_num}.srt') command = [ nzb2media.FFMPEG, @@ -825,14 +822,14 @@ def extract_subs(file, newfile_path): log.error('Extracting subtitles has failed') -def process_list(it, new_dir): +def process_list(iterable, new_dir): rem_list = [] new_list = [] combine = [] vts_path = None mts_path = None success = True - for item in it: + for item in iterable: ext = os.path.splitext(item)[1].lower() if ( ext in ['.iso', '.bin', '.img'] @@ -889,15 +886,15 @@ def process_list(it, new_dir): success = False break if success and new_list: - it.extend(new_list) + iterable.extend(new_list) for item in rem_list: - it.remove(item) + iterable.remove(item) log.debug(f'Successfully extracted .vob file {new_list[0]} from disk image') elif new_list and not success: new_list = [] rem_list = [] log.error('Failed extracting .vob files from disk image. Stopping transcoding.') - return it, rem_list, new_list, success + return iterable, rem_list, new_list, success def mount_iso(item, new_dir): # Currently only supports Linux Mount when permissions allow. @@ -926,9 +923,9 @@ def mount_iso(item, new_dir): # Currently only supports Linux Mount when permis except Exception: vts_path = os.path.split(full_path)[0] return combine_vts(vts_path) - elif ( - re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path) - and '.mts' not in nzb2media.IGNOREEXTENSIONS + if ( + re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path) + and '.mts' not in nzb2media.IGNOREEXTENSIONS ): log.debug(f'Found MTS image file: {full_path}') try: @@ -974,14 +971,14 @@ def rip_iso(item, new_dir): ] combined = [] if file_list: # handle DVD - for n in range(99): + for title_set in range(99): concat = [] - m = 1 + part = 1 while True: - vts_name = f'VIDEO_TS{os.sep}VTS_{n + 1:02d}_{m:d}.VOB' + vts_name = f'VIDEO_TS{os.sep}VTS_{title_set + 1:02d}_{part:d}.VOB' if vts_name in file_list: concat.append(vts_name) - m += 1 + part += 1 else: break if not concat: @@ -991,7 +988,7 @@ def rip_iso(item, new_dir): continue name = '{name}.cd{x}'.format( name=os.path.splitext(os.path.split(item)[1])[0], - x=n + 1, + x=title_set + 1, ) new_files.append({item: {'name': name, 'files': concat}}) else: # check BlueRay for BDMV/STREAM/XXXX.MTS @@ -1012,17 +1009,17 @@ def rip_iso(item, new_dir): mts_list.sort( key=lambda f: int(''.join(filter(str.isdigit, f))), ) - n = 0 + title_set = 0 for mts_name in mts_list: concat = [] - n += 1 + title_set += 1 concat.append(mts_name) if nzb2media.CONCAT: combined.extend(concat) continue name = '{name}.cd{x}'.format( name=os.path.splitext(os.path.split(item)[1])[0], - x=n, + x=title_set, ) new_files.append({item: {'name': name, 'files': concat}}) if nzb2media.CONCAT and combined: @@ -1045,14 +1042,14 @@ def combine_vts(vts_path): name = os.path.basename(os.path.dirname(name)) else: name = os.path.basename(name) - for n in range(99): + for title_set in range(99): concat = [] - m = 1 + part = 1 while True: - vts_name = f'VTS_{n + 1:02d}_{m:d}.VOB' + vts_name = f'VTS_{title_set + 1:02d}_{part:d}.VOB' if os.path.isfile(os.path.join(vts_path, vts_name)): concat.append(os.path.join(vts_path, vts_name)) - m += 1 + part += 1 else: break if not concat: @@ -1062,7 +1059,7 @@ def combine_vts(vts_path): continue name = '{name}.cd{x}'.format( name=name, - x=n + 1, + x=title_set + 1, ) new_files.append({vts_path: {'name': name, 'files': concat}}) if nzb2media.CONCAT: @@ -1078,7 +1075,7 @@ def combine_mts(mts_path): name = os.path.basename(os.path.dirname(name)) else: name = os.path.basename(name) - n = 0 + num = 0 mts_list = [ f for f in os.listdir(mts_path) @@ -1096,10 +1093,10 @@ def combine_mts(mts_path): continue name = '{name}.cd{x}'.format( name=name, - x=n + 1, + x=num + 1, ) new_files.append({mts_path: {'name': name, 'files': concat}}) - n += 1 + num += 1 if nzb2media.CONCAT: new_files.append({mts_path: {'name': name, 'files': combined}}) return new_files @@ -1112,11 +1109,11 @@ def combine_cd(combine): for ea_item in combine }: concat = '' - for n in range(99): + for num in range(99): files = [ file for file in combine - if n + 1 + if num + 1 == int(re.match('.+[cC][dD]([0-9]+).', file).groups()[0]) and item in file ] diff --git a/nzb2media/utils/encoding.py b/nzb2media/utils/encoding.py index 4ee4b04e..bd384930 100644 --- a/nzb2media/utils/encoding.py +++ b/nzb2media/utils/encoding.py @@ -15,37 +15,40 @@ def char_replace(name_in): # UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF # ISO-8859-15: 0xA6-0xFF # The function will detect if Name contains a special character - # If there is special character, detects if it is a UTF-8, CP850 or ISO-8859-15 encoding + # If there is special character, detects if it is a UTF-8, CP850 or + # ISO-8859-15 encoding encoded = False encoding = None if isinstance(name_in, str): return encoded, name_in name = bytes(name_in) - for Idx in range(len(name)): + for idx, character in enumerate(name): # print('Trying to intuit the encoding') # /!\ detection is done 2char by 2char for UTF-8 special character - if (len(name) != 1) & (Idx < (len(name) - 1)): + try: + next_character = name[idx + 1] + except IndexError: + # Detect CP850 + if (character >= 0x80) & (character <= 0xA5): + encoding = 'cp850' + break + # Detect ISO-8859-15 + elif (character >= 0xA6) & (character <= 0xFF): + encoding = 'iso-8859-15' + break + else: # Detect UTF-8 - if ((name[Idx] == 0xC2) | (name[Idx] == 0xC3)) & ( - (name[Idx + 1] >= 0xA0) & (name[Idx + 1] <= 0xFF) + if ((character == 0xC2) | (character == 0xC3)) & ( + (next_character >= 0xA0) & (next_character <= 0xFF) ): encoding = 'utf-8' break # Detect CP850 - elif (name[Idx] >= 0x80) & (name[Idx] <= 0xA5): + elif (character >= 0x80) & (character <= 0xA5): encoding = 'cp850' break # Detect ISO-8859-15 - elif (name[Idx] >= 0xA6) & (name[Idx] <= 0xFF): - encoding = 'iso-8859-15' - break - else: - # Detect CP850 - if (name[Idx] >= 0x80) & (name[Idx] <= 0xA5): - encoding = 'cp850' - break - # Detect ISO-8859-15 - elif (name[Idx] >= 0xA6) & (name[Idx] <= 0xFF): + elif (character >= 0xA6) & (character <= 0xFF): encoding = 'iso-8859-15' break if encoding: diff --git a/nzb2media/utils/files.py b/nzb2media/utils/files.py index 5bf82255..5a3d9033 100644 --- a/nzb2media/utils/files.py +++ b/nzb2media/utils/files.py @@ -28,21 +28,21 @@ def move_file(filename, path, link): file_ext = os.path.splitext(filename)[1] try: if file_ext in nzb2media.AUDIO_CONTAINER: - f = mediafile.MediaFile(filename) + guess = mediafile.MediaFile(filename) # get artist and album info - artist = f.artist - album = f.album + artist = guess.artist + album = guess.album # create new path new_path = os.path.join( path, f'{sanitize_name(artist)} - {sanitize_name(album)}', ) elif file_ext in nzb2media.MEDIA_CONTAINER: - f = guessit.guessit(filename) + guess = guessit.guessit(filename) # get title - title = f.get('series') or f.get('title') + title = guess.get('series') or guess.get('title') if not title: title = os.path.splitext(os.path.basename(filename))[0] @@ -217,11 +217,11 @@ def extract_files(src, dst=None, keep_archive=None): extracted_folder = [] extracted_archive = [] - for inputFile in list_media_files( + for input_file in list_media_files( src, media=False, audio=False, meta=False, archives=True, ): - dir_path = os.path.dirname(inputFile) - full_file_name = os.path.basename(inputFile) + dir_path = os.path.dirname(input_file) + full_file_name = os.path.basename(input_file) archive_name = os.path.splitext(full_file_name)[0] archive_name = re.sub(r'part[0-9]+', '', archive_name) @@ -229,29 +229,29 @@ def extract_files(src, dst=None, keep_archive=None): continue # no need to extract this, but keep going to look for other archives and sub directories. try: - if extractor.extract(inputFile, dst or dir_path): + if extractor.extract(input_file, dst or dir_path): extracted_folder.append(dir_path) extracted_archive.append(archive_name) except Exception: log.error(f'Extraction failed for: {full_file_name}') for folder in extracted_folder: - for inputFile in list_media_files( + for input_file in list_media_files( folder, media=False, audio=False, meta=False, archives=True, ): - full_file_name = os.path.basename(inputFile) + full_file_name = os.path.basename(input_file) archive_name = os.path.splitext(full_file_name)[0] archive_name = re.sub(r'part[0-9]+', '', archive_name) if archive_name not in extracted_archive or keep_archive: continue # don't remove if we haven't extracted this archive, or if we want to preserve them. log.info(f'Removing extracted archive {full_file_name} from folder {folder} ...') try: - if not os.access(inputFile, os.W_OK): - os.chmod(inputFile, stat.S_IWUSR) - os.remove(inputFile) + if not os.access(input_file, os.W_OK): + os.chmod(input_file, stat.S_IWUSR) + os.remove(input_file) time.sleep(1) except Exception as error: - log.error(f'Unable to remove file {inputFile} due to: {error}') + log.error(f'Unable to remove file {input_file} due to: {error}') def backup_versioned_file(old_file, version): diff --git a/nzb2media/utils/identification.py b/nzb2media/utils/identification.py index 1ec4282d..e275a3c4 100644 --- a/nzb2media/utils/identification.py +++ b/nzb2media/utils/identification.py @@ -19,16 +19,16 @@ def find_imdbid(dir_name, input_name, omdb_api_key): # find imdbid in dirName log.info('Searching folder and file names for imdbID ...') - m = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name) - if m: - imdbid = m.group(1) + match = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name) + if match: + imdbid = match.group(1) log.info(f'Found imdbID [{imdbid}]') return imdbid if os.path.isdir(dir_name): for file in os.listdir(dir_name): - m = re.search(r'\b(tt\d{7,8})\b', file) - if m: - imdbid = m.group(1) + match = re.search(r'\b(tt\d{7,8})\b', file) + if match: + imdbid = match.group(1) log.info(f'Found imdbID [{imdbid}] via file name') return imdbid if 'NZBPR__DNZB_MOREINFO' in os.environ: @@ -37,9 +37,9 @@ def find_imdbid(dir_name, input_name, omdb_api_key): regex = re.compile( r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE, ) - m = regex.match(dnzb_more_info) - if m: - imdbid = m.group(1) + match = regex.match(dnzb_more_info) + if match: + imdbid = match.group(1) log.info(f'Found imdbID [{imdbid}] from DNZB-MoreInfo') return imdbid log.info('Searching IMDB for imdbID ...') @@ -67,7 +67,7 @@ def find_imdbid(dir_name, input_name, omdb_api_key): log.debug(f'Opening URL: {url}') try: - r = requests.get( + response = requests.get( url, params={'apikey': omdb_api_key, 'y': year, 't': title}, verify=False, @@ -78,7 +78,7 @@ def find_imdbid(dir_name, input_name, omdb_api_key): return try: - results = r.json() + results = response.json() except Exception: log.error('No json data returned from omdbapi.com') diff --git a/nzb2media/utils/links.py b/nzb2media/utils/links.py index 5d05d8e7..9c4de104 100644 --- a/nzb2media/utils/links.py +++ b/nzb2media/utils/links.py @@ -29,14 +29,10 @@ def copy_link(src, target_link, use_link): if src != target_link and os.path.exists(target_link): log.info('MEDIAFILE already exists in the TARGET folder, skipping ...') return True - elif ( - src == target_link - and os.path.isfile(target_link) - and os.path.isfile(src) - ): + if src == target_link and os.path.isfile(target_link) and os.path.isfile(src): log.info('SOURCE AND TARGET files are the same, skipping ...') return True - elif src == os.path.dirname(target_link): + if src == os.path.dirname(target_link): log.info('SOURCE AND TARGET folders are the same, skipping ...') return True @@ -50,20 +46,20 @@ def copy_link(src, target_link, use_link): log.info('Directory junction linking SOURCE FOLDER -> TARGET FOLDER') linktastic.dirlink(src, target_link) return True - elif use_link == 'hard': + if use_link == 'hard': log.info('Hard linking SOURCE MEDIAFILE -> TARGET FOLDER') linktastic.link(src, target_link) return True - elif use_link == 'sym': + if use_link == 'sym': log.info('Sym linking SOURCE MEDIAFILE -> TARGET FOLDER') linktastic.symlink(src, target_link) return True - elif use_link == 'move-sym': + if use_link == 'move-sym': log.info('Sym linking SOURCE MEDIAFILE -> TARGET FOLDER') shutil.move(src, target_link) linktastic.symlink(target_link, src) return True - elif use_link == 'move': + if use_link == 'move': log.info('Moving SOURCE MEDIAFILE -> TARGET FOLDER') shutil.move(src, target_link) return True diff --git a/nzb2media/utils/network.py b/nzb2media/utils/network.py index 50252d46..d2056376 100644 --- a/nzb2media/utils/network.py +++ b/nzb2media/utils/network.py @@ -115,14 +115,14 @@ def find_download(client_agent, download_id): 'value': download_id, } try: - r = requests.get( + response = requests.get( url, params=params, verify=False, timeout=(30, 120), ) except requests.ConnectionError: log.error('Unable to open URL') return False # failure - result = r.json() + result = response.json() if result['files']: return True return False diff --git a/nzb2media/utils/nzb.py b/nzb2media/utils/nzb.py index 582cb086..92167314 100644 --- a/nzb2media/utils/nzb.py +++ b/nzb2media/utils/nzb.py @@ -26,12 +26,12 @@ def get_nzoid(input_name): 'output': 'json', } try: - r = requests.get(url, params=params, verify=False, timeout=(30, 120)) + response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: log.error('Unable to open URL') return nzoid # failure try: - result = r.json() + result = response.json() clean_name = os.path.splitext(os.path.split(input_name)[1])[0] slots.extend( [ @@ -43,12 +43,12 @@ def get_nzoid(input_name): log.warning('Data from SABnzbd queue could not be parsed') params['mode'] = 'history' try: - r = requests.get(url, params=params, verify=False, timeout=(30, 120)) + response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: log.error('Unable to open URL') return nzoid # failure try: - result = r.json() + result = response.json() clean_name = os.path.splitext(os.path.split(input_name)[1])[0] slots.extend( [ diff --git a/nzb2media/utils/paths.py b/nzb2media/utils/paths.py index 6ecb1efc..d16b274f 100644 --- a/nzb2media/utils/paths.py +++ b/nzb2media/utils/paths.py @@ -84,8 +84,8 @@ def remove_empty_folders(path, remove_root=True): log.debug(f'Checking for empty folders in:{path}') files = os.listdir(path) if len(files): - for f in files: - fullpath = os.path.join(path, f) + for each_file in files: + fullpath = os.path.join(path, each_file) if os.path.isdir(fullpath): remove_empty_folders(fullpath) @@ -111,16 +111,16 @@ def remove_read_only(filename): def flatten_dir(destination, files): log.info(f'FLATTEN: Flattening directory: {destination}') - for outputFile in files: - dir_path = os.path.dirname(outputFile) - file_name = os.path.basename(outputFile) + for output_file in files: + dir_path = os.path.dirname(output_file) + file_name = os.path.basename(output_file) if dir_path == destination: continue target = os.path.join(destination, file_name) try: - shutil.move(outputFile, target) + shutil.move(output_file, target) except Exception: - log.error(f'Could not flatten {outputFile}') + log.error(f'Could not flatten {output_file}') remove_empty_folders(destination) # Cleanup empty directories @@ -152,7 +152,7 @@ def rchmod(path, mod): return # Skip files for root, dirs, files in os.walk(path): - for d in dirs: - os.chmod(os.path.join(root, d), mod) - for f in files: - os.chmod(os.path.join(root, f), mod) + for each_dir in dirs: + os.chmod(os.path.join(root, each_dir), mod) + for each_file in files: + os.chmod(os.path.join(root, each_file), mod) diff --git a/nzb2media/utils/processes.py b/nzb2media/utils/processes.py index 7682deae..752b0afb 100644 --- a/nzb2media/utils/processes.py +++ b/nzb2media/utils/processes.py @@ -24,23 +24,22 @@ class WindowsProcess: # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9} _path_str = os.fspath(nzb2media.PID_FILE).replace('\\', '/') self.mutexname = f'nzbtomedia_{_path_str}' - self.CreateMutex = CreateMutex - self.CloseHandle = CloseHandle - self.GetLastError = GetLastError - self.ERROR_ALREADY_EXISTS = ERROR_ALREADY_EXISTS + self.create_mutex = CreateMutex + self.close_handle = CloseHandle + self.get_last_error = GetLastError + self.error_already_exists = ERROR_ALREADY_EXISTS def alreadyrunning(self): - self.mutex = self.CreateMutex(None, 0, self.mutexname) - self.lasterror = self.GetLastError() - if self.lasterror == self.ERROR_ALREADY_EXISTS: - self.CloseHandle(self.mutex) + self.mutex = self.create_mutex(None, 0, self.mutexname) + self.lasterror = self.get_last_error() + if self.lasterror == self.error_already_exists: + self.close_handle(self.mutex) return True - else: - return False + return False def __del__(self): if self.mutex: - self.CloseHandle(self.mutex) + self.close_handle(self.mutex) class PosixProcess: @@ -54,16 +53,16 @@ class PosixProcess: self.lock_socket.bind(f'\0{self.pidpath}') self.lasterror = False return self.lasterror - except OSError as e: - if 'Address already in use' in str(e): + except OSError as error: + if 'Address already in use' in str(error): self.lasterror = True return self.lasterror except AttributeError: pass - if os.path.exists(self.pidpath): + if self.pidpath.exists(): # Make sure it is not a 'stale' pidFile try: - pid = int(open(self.pidpath).read().strip()) + pid = int(self.pidpath.read_text().strip()) except Exception: pid = None # Check list of running pids, if not running it is stale so overwrite @@ -79,9 +78,9 @@ class PosixProcess: self.lasterror = False if not self.lasterror: - # Write my pid into pidFile to keep multiple copies of program from running - with self.pidpath.open(mode='w') as fp: - fp.write(os.getpid()) + # Write my pid into pidFile to keep multiple copies of program + # from running + self.pidpath.write_text(os.getpid()) return self.lasterror def __del__(self): diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index be7efeb8..cf95e8b2 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -123,8 +123,7 @@ class GitUpdateManager(UpdateManager): if exit_status == 0: log.debug(f'Using: {main_git}') return main_git - else: - log.debug(f'Not using: {main_git}') + log.debug(f'Not using: {main_git}') # trying alternatives @@ -148,8 +147,7 @@ class GitUpdateManager(UpdateManager): if exit_status == 0: log.debug(f'Using: {cur_git}') return cur_git - else: - log.debug(f'Not using: {cur_git}') + log.debug(f'Not using: {cur_git}') # Still haven't found a working git log.debug( @@ -230,8 +228,7 @@ class GitUpdateManager(UpdateManager): if self._cur_commit_hash: nzb2media.NZBTOMEDIA_VERSION = self._cur_commit_hash return True - else: - return False + return False def _find_git_branch(self): nzb2media.NZBTOMEDIA_BRANCH = self.get_github_branch() @@ -277,8 +274,7 @@ class GitUpdateManager(UpdateManager): if not re.match('^[a-z0-9]+$', cur_commit_hash): log.debug('Output doesn\'t look like a hash, not using it') return - else: - self._newest_commit_hash = cur_commit_hash + self._newest_commit_hash = cur_commit_hash else: log.debug('git didn\'t return newest commit hash') return @@ -315,15 +311,15 @@ class GitUpdateManager(UpdateManager): if not self._cur_commit_hash: return True - else: - try: - self._check_github_for_update() - except Exception as error: - log.error(f'Unable to contact github, can\'t check for update: {error!r}') - return False - if self._num_commits_behind > 0: - return True + try: + self._check_github_for_update() + except Exception as error: + log.error(f'Unable to contact github, can\'t check for update: {error!r}') + return False + + if self._num_commits_behind > 0: + return True return False @@ -363,8 +359,8 @@ class SourceUpdateManager(UpdateManager): return try: - with open(version_file) as fp: - self._cur_commit_hash = fp.read().strip(' \n\r') + with open(version_file) as fin: + self._cur_commit_hash = fin.read().strip(' \n\r') except OSError as error: log.debug(f'Unable to open \'version.txt\': {error}') @@ -401,14 +397,14 @@ class SourceUpdateManager(UpdateManager): self._num_commits_behind = 0 self._newest_commit_hash = None - gh = github.GitHub( + repository = github.GitHub( self.github_repo_user, self.github_repo, self.branch, ) # try to get newest commit hash and commits behind directly by # comparing branch and current commit if self._cur_commit_hash: - branch_compared = gh.compare( + branch_compared = repository.compare( base=self.branch, head=self._cur_commit_hash, ) @@ -423,13 +419,13 @@ class SourceUpdateManager(UpdateManager): # fall back and iterate over last 100 (items per page in gh_api) commits if not self._newest_commit_hash: - for curCommit in gh.commits(): + for cur_commit in repository.commits(): if not self._newest_commit_hash: - self._newest_commit_hash = curCommit['sha'] + self._newest_commit_hash = cur_commit['sha'] if not self._cur_commit_hash: break - if curCommit['sha'] == self._cur_commit_hash: + if cur_commit['sha'] == self._cur_commit_hash: break # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 From 1938fcc66aab8f4473edb760444129d1a6b8f8de Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 12:55:16 -0500 Subject: [PATCH 09/14] Use with for resource access --- nzb2media/__init__.py | 22 +++++----- nzb2media/extractor/__init__.py | 65 ++++++++++------------------- nzb2media/scene_exceptions.py | 12 ++++-- nzb2media/transcoder.py | 73 +++++++++++++++++---------------- nzb2media/user_scripts.py | 18 ++++---- nzb2media/utils/processes.py | 6 +-- nzb2media/version_check.py | 32 +++++++-------- 7 files changed, 104 insertions(+), 124 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index c401449e..b605e7cf 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -37,13 +37,13 @@ except ImportError: def which(name): - proc = subprocess.Popen(['which', name], stdout=PIPE) - try: - proc_out, proc_err = proc.communicate() - except Exception: - return '' - else: - return proc_out.strip().decode() + with subprocess.Popen(['which', name], stdout=PIPE) as proc: + try: + proc_out, proc_err = proc.communicate() + except Exception: + return '' + else: + return proc_out.strip().decode() def module_path(module=__file__): @@ -544,8 +544,8 @@ def configure_remote_paths(): def configure_niceness(): global NICENESS try: - proc = subprocess.Popen(['nice'], stdout=DEVNULL, stderr=DEVNULL) - proc.communicate() + with subprocess.Popen(['nice'], stdout=DEVNULL, stderr=DEVNULL) as proc: + proc.communicate() niceness = CFG['Posix']['niceness'] if ( len(niceness.split(',')) > 1 @@ -556,8 +556,8 @@ def configure_niceness(): except Exception: pass try: - proc = subprocess.Popen(['ionice'], stdout=DEVNULL, stderr=DEVNULL) - proc.communicate() + with subprocess.Popen(['ionice'], stdout=DEVNULL, stderr=DEVNULL) as proc: + proc.communicate() try: ionice = CFG['Posix']['ionice_class'] NICENESS.extend(['ionice', f'-c{int(ionice)}']) diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index 569a4ae9..5f42aacd 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -8,6 +8,7 @@ import stat import subprocess from subprocess import call from subprocess import Popen +from subprocess import DEVNULL from time import sleep import nzb2media @@ -87,35 +88,23 @@ def extract(file_path, output_destination): } # Test command exists and if not, remove if not os.getenv('TR_TORRENT_DIR'): - devnull = open(os.devnull, 'w') for cmd in required_cmds: - if call( - ['which', cmd], - stdout=devnull, - stderr=devnull, - ): # note, returns 0 if exists, or 1 if doesn't exist. + if call(['which', cmd], stdout=DEVNULL, stderr=DEVNULL): + # note, returns 0 if exists, or 1 if doesn't exist. for key, val in extract_commands.items(): if cmd in val[0]: - if not call( - ['which', '7zr'], - stdout=devnull, - stderr=devnull, - ): # we do have '7zr' + if not call(['which', '7zr'], stdout=DEVNULL, stderr=DEVNULL): + # we do have '7zr' extract_commands[key] = ['7zr', 'x', '-y'] - elif not call( - ['which', '7z'], stdout=devnull, stderr=devnull, - ): # we do have '7z' + elif not call(['which', '7z'], stdout=DEVNULL, stderr=DEVNULL): + # we do have '7z' extract_commands[key] = ['7z', 'x', '-y'] - elif not call( - ['which', '7za'], - stdout=devnull, - stderr=devnull, - ): # we do have '7za' + elif not call(['which', '7za'], stdout=DEVNULL, stderr=DEVNULL): + # we do have '7za' extract_commands[key] = ['7za', 'x', '-y'] else: log.error(f'EXTRACTOR: {cmd} not found, disabling support for {key}') del extract_commands[key] - devnull.close() else: log.warning('EXTRACTOR: Cannot determine which tool to use when called from Transmission') @@ -130,19 +119,10 @@ def extract(file_path, output_destination): cmd = extract_commands[f'.tar{ext[1]}'] else: # Try gunzip cmd = extract_commands[ext[1]] - elif ext[1] in ('.1', '.01', '.001') and os.path.splitext(ext[0])[1] in ( - '.rar', - '.zip', - '.7z', - ): + elif ext[1] in ('.1', '.01', '.001') and os.path.splitext(ext[0])[1] in ('.rar', '.zip', '.7z'): cmd = extract_commands[os.path.splitext(ext[0])[1]] - elif ext[1] in ( - '.cb7', - '.cba', - '.cbr', - '.cbt', - '.cbz', - ): # don't extract these comic book archives. + elif ext[1] in ('.cb7', '.cba', '.cbr', '.cbt', '.cbz'): + # don't extract these comic book archives. return False else: if ext[1] in extract_commands: @@ -157,10 +137,11 @@ def extract(file_path, output_destination): if nzb2media.PASSWORDS_FILE and os.path.isfile( os.path.normpath(nzb2media.PASSWORDS_FILE), ): - passwords = [ - line.strip() - for line in open(os.path.normpath(nzb2media.PASSWORDS_FILE)) - ] + with open(os.path.normpath(nzb2media.PASSWORDS_FILE)) as fin: + passwords = [ + line.strip() + for line in fin + ] else: passwords = [] @@ -179,7 +160,6 @@ def extract(file_path, output_destination): os.chdir( output_destination, ) # Not all unpack commands accept full paths, so just extract into this directory - devnull = open(os.devnull, 'w') try: # now works same for nt and *nix info = None @@ -192,9 +172,8 @@ def extract(file_path, output_destination): cmd2 = cmd if 'gunzip' not in cmd: # gunzip doesn't support password cmd2.append('-p-') # don't prompt for password. - res = Popen( - cmd2, stdout=devnull, stderr=devnull, startupinfo=info, - ).wait() # should extract files fine. + with Popen(cmd2, stdout=DEVNULL, stderr=DEVNULL, startupinfo=info) as proc: + res = proc.wait() # should extract files fine. if res == 0: # Both Linux and Windows return 0 for successful. log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination}') success = 1 @@ -209,10 +188,8 @@ def extract(file_path, output_destination): # append password here. passcmd = f'-p{password}' cmd2.append(passcmd) - proc = Popen( - cmd2, stdout=devnull, stderr=devnull, startupinfo=info, - ) - res = proc.wait() # should extract files fine. + with Popen(cmd2, stdout=DEVNULL, stderr=DEVNULL, startupinfo=info) as proc: + res = proc.wait() # should extract files fine. if (res >= 0 and platform == 'Windows') or res == 0: log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination} using password: {password}') success = 1 diff --git a/nzb2media/scene_exceptions.py b/nzb2media/scene_exceptions.py index 7cef618d..24d3cb16 100644 --- a/nzb2media/scene_exceptions.py +++ b/nzb2media/scene_exceptions.py @@ -177,7 +177,11 @@ def rename_script(dirname): dirname = directory break if rename_file: - rename_lines = [line.strip() for line in open(rename_file)] + with open(rename_file) as fin: + rename_lines = [ + line.strip() + for line in fin + ] for line in rename_lines: if re.search('^(mv|Move)', line, re.IGNORECASE): cmd = shlex.split(line)[1:] @@ -219,9 +223,9 @@ def par2(dirname): cmd = f'{cmd} {item}' log.debug(f'calling command:{cmd}') try: - proc = subprocess.Popen(command, stdout=DEVNULL, stderr=DEVNULL) - proc.communicate() - result = proc.returncode + with subprocess.Popen(command, stdout=DEVNULL, stderr=DEVNULL) as proc: + proc.communicate() + result = proc.returncode except Exception: log.error(f'par2 file processing for {parfile} has failed') if result == 0: diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index 7fc439a1..04e80b76 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -100,16 +100,16 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): def zip_out(file, img): - proc = None if os.path.isfile(file): cmd = ['cat', file] else: cmd = [nzb2media.SEVENZIP, '-so', 'e', img, file] try: - proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) + with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: + return proc except Exception: log.error(f'Extracting [{file}] has failed') - return proc + return None def get_video_details(videofile, img=None): @@ -136,13 +136,15 @@ def get_video_details(videofile, img=None): print_cmd(command) if img: procin = zip_out(file, img) - proc = subprocess.Popen(command, stdout=PIPE, stdin=procin.stdout) + with subprocess.Popen(command, stdout=PIPE, stdin=procin.stdout) as proc: + proc_out, proc_err = proc.communicate() + result = proc.returncode procin.stdout.close() else: - proc = subprocess.Popen(command, stdout=PIPE) - out, err = proc.communicate() - result = proc.returncode - video_details = json.loads(out.decode()) + with subprocess.Popen(command, stdout=PIPE) as proc: + proc_out, proc_err = proc.communicate() + result = proc.returncode + video_details = json.loads(proc_out.decode()) except Exception: try: # try this again without -show error in case of ffmpeg limitation command = [ @@ -158,13 +160,15 @@ def get_video_details(videofile, img=None): print_cmd(command) if img: procin = zip_out(file, img) - proc = subprocess.Popen(command, stdout=PIPE, stdin=procin.stdout) + with subprocess.Popen(command, stdout=PIPE, stdin=procin.stdout) as proc: + proc_out, proc_err = proc.communicate() + result = proc.returncode procin.stdout.close() else: - proc = subprocess.Popen(command, stdout=PIPE) - out, err = proc.communicate() - result = proc.returncode - video_details = json.loads(out.decode()) + with subprocess.Popen(command, stdout=PIPE) as proc: + proc_out, proc_err = proc.communicate() + result = proc.returncode + video_details = json.loads(proc_out.decode()) except Exception: log.error(f'Checking [{file}] has failed') return video_details, result @@ -804,11 +808,9 @@ def extract_subs(file, newfile_path): print_cmd(command) result = 1 # set result to failed in case call fails. try: - proc = subprocess.Popen( - command, stdout=DEVNULL, stderr=DEVNULL, - ) - proc_out, proc_error = proc.communicate() - result = proc.returncode + with subprocess.Popen(command, stdout=DEVNULL, stderr=DEVNULL) as proc: + proc_out, proc_error = proc.communicate() + result = proc.returncode except Exception: log.error('Extracting subtitle has failed') @@ -905,8 +907,8 @@ def mount_iso(item, new_dir): # Currently only supports Linux Mount when permis make_dir(mount_point) cmd = ['mount', '-o', 'loop', item, mount_point] print_cmd(cmd) - proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) - out, err = proc.communicate() + with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: + proc_out, proc_err = proc.communicate() nzb2media.MOUNTED = ( mount_point # Allows us to verify this has been done and then cleanup. ) @@ -956,13 +958,13 @@ def rip_iso(item, new_dir): try: log.debug(f'Attempting to extract .vob or .mts from image file {item}') print_cmd(cmd) - proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) - out, err = proc.communicate() + with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: + proc_out, proc_err = proc.communicate() file_match_gen = ( re.match( r'.+(VIDEO_TS[/\\]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])', line, ) - for line in out.decode().splitlines() + for line in proc_out.decode().splitlines() ) file_list = [ file_match.groups()[0] @@ -994,7 +996,7 @@ def rip_iso(item, new_dir): else: # check BlueRay for BDMV/STREAM/XXXX.MTS mts_list_gen = ( re.match(r'.+(BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]).', line) - for line in out.decode().splitlines() + for line in proc_out.decode().splitlines() ) mts_list = [ file_match.groups()[0] @@ -1182,17 +1184,18 @@ def transcode_directory(dir_name): result = 1 # set result to failed in case call fails. try: if isinstance(file, str): - proc = subprocess.Popen(command, stdout=DEVNULL, stderr=PIPE) + with subprocess.Popen(command, stdout=DEVNULL, stderr=PIPE) as proc: + out, err = proc.communicate() else: img, data = next(file.items()) - proc = subprocess.Popen(command, stdout=DEVNULL, stderr=PIPE, stdin=PIPE) - for vob in data['files']: - procin = zip_out(vob, img) - if procin: - log.debug(f'Feeding in file: {vob} to Transcoder') - shutil.copyfileobj(procin.stdout, proc.stdin) - procin.stdout.close() - out, err = proc.communicate() + with subprocess.Popen(command, stdout=DEVNULL, stderr=PIPE, stdin=PIPE) as proc: + for vob in data['files']: + procin = zip_out(vob, img) + if procin: + log.debug(f'Feeding in file: {vob} to Transcoder') + shutil.copyfileobj(procin.stdout, proc.stdin) + procin.stdout.close() + out, err = proc.communicate() if err: log.error(f'Transcoder returned:{err} has failed') result = proc.returncode @@ -1231,8 +1234,8 @@ def transcode_directory(dir_name): time.sleep(5) # play it safe and avoid failing to unmount. cmd = ['umount', '-l', nzb2media.MOUNTED] print_cmd(cmd) - proc = subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) - out, err = proc.communicate() + with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: + proc_out, proc_err = proc.communicate() time.sleep(5) os.rmdir(nzb2media.MOUNTED) nzb2media.MOUNTED = None diff --git a/nzb2media/user_scripts.py b/nzb2media/user_scripts.py index d761ddd0..92a2c0bf 100644 --- a/nzb2media/user_scripts.py +++ b/nzb2media/user_scripts.py @@ -118,20 +118,20 @@ def external_script(output_destination, torrent_name, torrent_label, settings): cmd = f'{cmd} {item}' log.info(f'Running script {cmd} on file {file_path}.') try: - proc = Popen(command) - res = proc.wait() - if ( - str(res) in nzb2media.USER_SCRIPT_SUCCESSCODES - ): # Linux returns 0 for successful. + with Popen(command) as proc: + res = proc.wait() + except Exception: + log.error(f'UserScript {command[0]} has failed') + result = 1 + else: + if str(res) in nzb2media.USER_SCRIPT_SUCCESSCODES: + # Linux returns 0 for successful. log.info(f'UserScript {command[0]} was successfull') result = 0 else: log.error(f'UserScript {command[0]} has failed with return code: {res}') log.info(f'If the UserScript completed successfully you should add {res} to the user_script_successCodes') - result = int(1) - except Exception: - log.error(f'UserScript {command[0]} has failed') - result = int(1) + result = 1 final_result += result num_files_new = 0 diff --git a/nzb2media/utils/processes.py b/nzb2media/utils/processes.py index 752b0afb..cf4756cc 100644 --- a/nzb2media/utils/processes.py +++ b/nzb2media/utils/processes.py @@ -110,8 +110,8 @@ def restart(): if popen_list: popen_list += nzb2media.SYS_ARGV log.info(f'Restarting nzbToMedia with {popen_list}') - proc = subprocess.Popen(popen_list, cwd=os.getcwd()) - proc.wait() - status = proc.returncode + with subprocess.Popen(popen_list, cwd=os.getcwd()) as proc: + proc.wait() + status = proc.returncode os._exit(status) diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index cf95e8b2..fed1e778 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -160,52 +160,49 @@ class GitUpdateManager(UpdateManager): def _run_git(self, git_path, args): - proc_out = None + result = None proc_err = None if not git_path: log.debug('No git specified, can\'t use git commands') proc_status = 1 - return proc_out, proc_err, proc_status + return result, proc_err, proc_status cmd = f'{git_path} {args}' try: log.debug(f'Executing {cmd} with your shell in {nzb2media.APP_ROOT}') - proc = subprocess.Popen( + with subprocess.Popen( cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=True, cwd=nzb2media.APP_ROOT, - ) - proc_out, proc_err = proc.communicate() - proc_status = proc.returncode + ) as proc: + proc_out, proc_err = proc.communicate() + proc_status = proc.returncode - proc_out = proc_out.decode('utf-8') - - if proc_out: - proc_out = proc_out.strip() if nzb2media.LOG_GIT: - log.debug(f'git output: {proc_out}') + msg = proc_out.decode('utf-8').strip() + log.debug(f'git output: {msg}') except OSError: log.error(f'Command {cmd} didn\'t work') proc_status = 1 - proc_status = 128 if ('fatal:' in proc_out) or proc_err else proc_status + proc_status = 128 if ('fatal:' in result) or proc_err else proc_status if proc_status == 0: log.debug(f'{cmd} : returned successful') proc_status = 0 elif nzb2media.LOG_GIT and proc_status in (1, 128): - log.debug(f'{cmd} returned : {proc_out}') + log.debug(f'{cmd} returned : {result}') else: if nzb2media.LOG_GIT: - log.debug(f'{cmd} returned : {proc_out}, treat as error for now') + log.debug(f'{cmd} returned : {result}, treat as error for now') proc_status = 1 - return proc_out, proc_err, proc_status + return result, proc_err, proc_status def _find_installed_version(self): """ @@ -480,9 +477,8 @@ class SourceUpdateManager(UpdateManager): # extract to sb-update dir log.info(f'Extracting file {tar_download_path}') - tar = tarfile.open(tar_download_path) - tar.extractall(sb_update_dir) - tar.close() + with tarfile.open(tar_download_path) as tar: + tar.extractall(sb_update_dir) # delete .tar.gz log.info(f'Deleting file {tar_download_path}') From bea41c3f2ebecf03e3a00d3cea726d72f272663c Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 15:09:51 -0500 Subject: [PATCH 10/14] Ignore line-length limits to better identify areas to be refactored. --- nzb2media/__init__.py | 818 +--------------------- nzb2media/auto_process/books.py | 64 +- nzb2media/auto_process/comics.py | 71 +- nzb2media/auto_process/common.py | 30 +- nzb2media/auto_process/games.py | 73 +- nzb2media/auto_process/movies.py | 358 ++-------- nzb2media/auto_process/music.py | 190 +---- nzb2media/auto_process/tv.py | 296 ++------ nzb2media/configuration.py | 732 ++----------------- nzb2media/databases.py | 24 +- nzb2media/extractor/__init__.py | 98 +-- nzb2media/extractor/bin/AMD64/license.txt | 39 +- nzb2media/extractor/bin/invisible.vbs | 4 - nzb2media/extractor/bin/x86/license.txt | 39 +- nzb2media/github_api.py | 21 +- nzb2media/main_db.py | 85 +-- nzb2media/managers/pymedusa.py | 100 +-- nzb2media/managers/sickbeard.py | 353 ++-------- nzb2media/nzb/configuration.py | 6 +- nzb2media/plugins/plex.py | 17 +- nzb2media/plugins/subtitles.py | 52 +- nzb2media/processor/manual.py | 34 +- nzb2media/processor/nzb.py | 83 +-- nzb2media/processor/nzbget.py | 18 +- nzb2media/processor/sab.py | 22 +- nzb2media/scene_exceptions.py | 78 +-- nzb2media/torrent/configuration.py | 21 +- nzb2media/torrent/deluge.py | 1 - nzb2media/torrent/qbittorrent.py | 1 - nzb2media/torrent/synology.py | 1 - nzb2media/torrent/transmission.py | 1 - nzb2media/torrent/utorrent.py | 1 - nzb2media/transcoder.py | 561 +++------------ nzb2media/user_scripts.py | 62 +- nzb2media/utils/common.py | 51 +- nzb2media/utils/download_info.py | 1 - nzb2media/utils/encoding.py | 25 +- nzb2media/utils/files.py | 104 +-- nzb2media/utils/identification.py | 81 +-- nzb2media/utils/links.py | 11 +- nzb2media/utils/naming.py | 9 +- nzb2media/utils/network.py | 17 +- nzb2media/utils/nzb.py | 20 +- nzb2media/utils/parsers.py | 33 +- nzb2media/utils/paths.py | 14 +- nzb2media/utils/processes.py | 6 - nzb2media/utils/torrent.py | 10 +- nzb2media/version_check.py | 157 +---- 48 files changed, 630 insertions(+), 4263 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index b605e7cf..55b9c90b 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -13,9 +13,9 @@ import time import typing from subprocess import PIPE, DEVNULL +from nzb2media import databases from nzb2media import main_db from nzb2media import version_check -from nzb2media import databases from nzb2media.configuration import Config from nzb2media.nzb.configuration import configure_nzbs from nzb2media.plugins.plex import configure_plex @@ -28,7 +28,6 @@ from nzb2media.utils.processes import restart log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - try: import win32event except ImportError: @@ -56,7 +55,6 @@ def module_path(module=__file__): SOURCE_ROOT = module_path() APP_ROOT = SOURCE_ROOT.parent - # init preliminaries SYS_ARGV = sys.argv[1:] APP_FILENAME = pathlib.Path(sys.argv[0]) @@ -70,22 +68,10 @@ CONFIG_MOVIE_FILE = APP_ROOT / 'autoProcessMovie.cfg' CONFIG_TV_FILE = APP_ROOT / 'autoProcessTv.cfg' TEST_FILE = APP_ROOT / 'tests' / 'test.mp4' MYAPP = None - __version__ = '12.1.11' - # Client Agents NZB_CLIENTS = ['sabnzbd', 'nzbget', 'manual'] -TORRENT_CLIENTS = [ - 'transmission', - 'deluge', - 'utorrent', - 'rtorrent', - 'qbittorrent', - 'other', - 'manual', -] - - +TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'qbittorrent', 'other', 'manual'] # sickbeard fork/branch constants FORK_DEFAULT = 'default' FORK_FAILED = 'failed' @@ -99,122 +85,16 @@ FORK_MEDUSA_APIV2 = 'Medusa-apiv2' FORK_SICKGEAR = 'SickGear' FORK_SICKGEAR_API = 'SickGear-api' FORK_STHENO = 'Stheno' - -FORKS: typing.Mapping[str, typing.Mapping] = { - FORK_DEFAULT: {'dir': None}, - FORK_FAILED: {'dirName': None, 'failed': None}, - FORK_FAILED_TORRENT: {'dir': None, 'failed': None, 'process_method': None}, - FORK_SICKCHILL: { - 'proc_dir': None, - 'failed': None, - 'process_method': None, - 'force': None, - 'delete_on': None, - 'force_next': None, - }, - FORK_SICKCHILL_API: { - 'path': None, - 'proc_dir': None, - 'failed': None, - 'process_method': None, - 'force': None, - 'force_replace': None, - 'return_data': None, - 'type': None, - 'delete': None, - 'force_next': None, - 'is_priority': None, - 'cmd': 'postprocess', - }, - FORK_SICKBEARD_API: { - 'path': None, - 'failed': None, - 'process_method': None, - 'force_replace': None, - 'return_data': None, - 'type': None, - 'delete': None, - 'force_next': None, - 'cmd': 'postprocess', - }, - FORK_MEDUSA: { - 'proc_dir': None, - 'failed': None, - 'process_method': None, - 'force': None, - 'delete_on': None, - 'ignore_subs': None, - }, - FORK_MEDUSA_API: { - 'path': None, - 'failed': None, - 'process_method': None, - 'force_replace': None, - 'return_data': None, - 'type': None, - 'delete_files': None, - 'is_priority': None, - 'cmd': 'postprocess', - }, - FORK_MEDUSA_APIV2: { - 'proc_dir': None, - 'resource': None, - 'failed': None, - 'process_method': None, - 'force': None, - 'type': None, - 'delete_on': None, - 'is_priority': None, - }, - FORK_SICKGEAR: { - 'dir': None, - 'failed': None, - 'process_method': None, - 'force': None, - }, - FORK_SICKGEAR_API: { - 'path': None, - 'process_method': None, - 'force_replace': None, - 'return_data': None, - 'type': None, - 'is_priority': None, - 'failed': None, - 'cmd': 'sg.postprocess', - }, - FORK_STHENO: { - 'proc_dir': None, - 'failed': None, - 'process_method': None, - 'force': None, - 'delete_on': None, - 'ignore_subs': None, - }, -} -ALL_FORKS = { - k: None - for k in set( - itertools.chain.from_iterable( - [ - FORKS[x].keys() - for x in FORKS.keys() - ], - ), - ) -} - +FORKS: typing.Mapping[str, typing.Mapping] = {FORK_DEFAULT: {'dir': None}, FORK_FAILED: {'dirName': None, 'failed': None}, FORK_FAILED_TORRENT: {'dir': None, 'failed': None, 'process_method': None}, FORK_SICKCHILL: {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'force_next': None}, FORK_SICKCHILL_API: {'path': None, 'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None, 'is_priority': None, 'cmd': 'postprocess'}, FORK_SICKBEARD_API: {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None, 'cmd': 'postprocess'}, FORK_MEDUSA: {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'ignore_subs': None}, FORK_MEDUSA_API: {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete_files': None, 'is_priority': None, 'cmd': 'postprocess'}, FORK_MEDUSA_APIV2: {'proc_dir': None, 'resource': None, 'failed': None, 'process_method': None, 'force': None, 'type': None, 'delete_on': None, 'is_priority': None}, FORK_SICKGEAR: {'dir': None, 'failed': None, 'process_method': None, 'force': None}, FORK_SICKGEAR_API: {'path': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'is_priority': None, 'failed': None, 'cmd': 'sg.postprocess'}, FORK_STHENO: {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'ignore_subs': None}} +ALL_FORKS = {k: None for k in set(itertools.chain.from_iterable([FORKS[x].keys() for x in FORKS.keys()]))} # SiCKRAGE OAuth2 SICKRAGE_OAUTH_CLIENT_ID = 'nzbtomedia' -SICKRAGE_OAUTH_TOKEN_URL = ( - 'https://auth.sickrage.ca/realms/sickrage/protocol/openid-connect/token' -) - +SICKRAGE_OAUTH_TOKEN_URL = 'https://auth.sickrage.ca/realms/sickrage/protocol/openid-connect/token' # NZBGet Exit Codes NZBGET_POSTPROCESS_PAR_CHECK = 92 NZBGET_POSTPROCESS_SUCCESS = 93 NZBGET_POSTPROCESS_ERROR = 94 NZBGET_POSTPROCESS_NONE = 95 - CFG = None LOG_DEBUG = None LOG_DB = None @@ -222,7 +102,6 @@ LOG_ENV = None LOG_GIT = None SYS_ENCODING = None FAILED = False - AUTO_UPDATE = None NZBTOMEDIA_VERSION = __version__ NEWEST_VERSION = None @@ -235,13 +114,11 @@ GIT_REPO = None FORCE_CLEAN = None SAFE_MODE = None NOEXTRACTFAILED = None - NZB_CLIENT_AGENT = None SABNZBD_HOST = None SABNZBD_PORT = None SABNZBD_APIKEY = None NZB_DEFAULT_DIRECTORY = None - TORRENT_CLIENT_AGENT = None TORRENT_CLASS = None USE_LINK = None @@ -252,49 +129,39 @@ TORRENT_CHMOD_DIRECTORY = None TORRENT_DEFAULT_DIRECTORY = None TORRENT_RESUME = None TORRENT_RESUME_ON_FAILURE = None - REMOTE_PATHS = [] - UTORRENT_WEB_UI = None UTORRENT_USER = None UTORRENT_PASSWORD = None - TRANSMISSION_HOST = None TRANSMISSION_PORT = None TRANSMISSION_USER = None TRANSMISSION_PASSWORD = None - SYNO_HOST = None SYNO_PORT = None SYNO_USER = None SYNO_PASSWORD = None - DELUGE_HOST = None DELUGE_PORT = None DELUGE_USER = None DELUGE_PASSWORD = None - QBITTORRENT_HOST = None QBITTORRENT_PORT = None QBITTORRENT_USER = None QBITTORRENT_PASSWORD = None - PLEX_SSL = None PLEX_HOST = None PLEX_PORT = None PLEX_TOKEN = None PLEX_SECTION: list[str] = [] - EXT_CONTAINER: list[str] = [] COMPRESSED_CONTAINER = [] MEDIA_CONTAINER = [] AUDIO_CONTAINER = [] META_CONTAINER = [] - SECTIONS: list[str] = [] CATEGORIES: list[str] = [] FORK_SET: list[str] = [] - MOUNTED = None GETSUBS = False TRANSCODE = None @@ -350,11 +217,9 @@ CHECK_MEDIA = None REQUIRE_LAN = None NICENESS = [] HWACCEL = False - PASSWORDS_FILE = None DOWNLOAD_INFO = None GROUPS = None - USER_SCRIPT_MEDIAEXTENSIONS = None USER_SCRIPT = None USER_SCRIPT_PARAM = None @@ -362,25 +227,21 @@ USER_SCRIPT_SUCCESSCODES = None USER_SCRIPT_CLEAN = None USER_DELAY = None USER_SCRIPT_RUNONCE = None - __INITIALIZED__ = False def configure_logging(): global LOG_FILE global LOG_DIR - if 'NTM_LOGFILE' in os.environ: LOG_FILE = os.environ['NTM_LOGFILE'] LOG_DIR = os.path.split(LOG_FILE)[0] - if not make_dir(LOG_DIR): print('No log folder, logging to screen only') def configure_process(): global MYAPP - MYAPP = RunningProcess() while MYAPP.alreadyrunning(): print('Waiting for existing session to end') @@ -389,26 +250,19 @@ def configure_process(): def configure_locale(): global SYS_ENCODING - try: locale.setlocale(locale.LC_ALL, '') SYS_ENCODING = locale.getpreferredencoding() except (locale.Error, OSError): pass - # For OSes that are poorly configured I'll just randomly force UTF-8 - if not SYS_ENCODING or SYS_ENCODING in ( - 'ANSI_X3.4-1968', - 'US-ASCII', - 'ASCII', - ): + if not SYS_ENCODING or SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): SYS_ENCODING = 'UTF-8' def configure_migration(): global CONFIG_FILE global CFG - # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. if not Config.migrate(): log.error(f'Unable to migrate config file {CONFIG_FILE}, exiting ...') @@ -416,11 +270,9 @@ def configure_migration(): pass # We will try and read config from Environment. else: sys.exit(-1) - # run migrate to convert NzbGet data from old cfg style to new cfg style if 'NZBOP_SCRIPTDIR' in os.environ: CFG = Config.addnzbget() - else: # load newly migrated config log.info(f'Loading config from [{CONFIG_FILE}]') CFG = Config() @@ -431,13 +283,11 @@ def configure_logging_part_2(): global LOG_DEBUG global LOG_ENV global LOG_GIT - # Enable/Disable DEBUG Logging LOG_DB = int(CFG['General']['log_db']) LOG_DEBUG = int(CFG['General']['log_debug']) LOG_ENV = int(CFG['General']['log_env']) LOG_GIT = int(CFG['General']['log_git']) - if LOG_ENV: for item in os.environ: log.info(f'{item}: {os.environ[item]}') @@ -456,7 +306,6 @@ def configure_general(): global REQUIRE_LAN global SAFE_MODE global NOEXTRACTFAILED - # Set Version and GIT variables VERSION_NOTIFY = int(CFG['General']['version_notify']) GIT_REPO = 'nzbToMedia' @@ -467,11 +316,7 @@ def configure_general(): FFMPEG_PATH = CFG['General']['ffmpeg_path'] SYS_PATH = CFG['General']['sys_path'] CHECK_MEDIA = int(CFG['General']['check_media']) - REQUIRE_LAN = ( - None - if not CFG['General']['require_lan'] - else CFG['General']['require_lan'].split(',') - ) + REQUIRE_LAN = None if not CFG['General']['require_lan'] else CFG['General']['require_lan'].split(',') SAFE_MODE = int(CFG['General']['safe_mode']) NOEXTRACTFAILED = int(CFG['General']['no_extract_failed']) @@ -479,10 +324,8 @@ def configure_general(): def configure_updates(): global AUTO_UPDATE global MYAPP - AUTO_UPDATE = int(CFG['General']['auto_update']) version_checker = version_check.CheckVersion() - # Check for updates via GitHUB if version_checker.check_for_new_version() and AUTO_UPDATE: log.info('Auto-Updating nzbToMedia, Please wait ...') @@ -495,7 +338,6 @@ def configure_updates(): restart() else: log.error('Update failed, not restarting. Check your log for more information.') - # Set Current Version log.info(f'nzbToMedia Version:{NZBTOMEDIA_VERSION} Branch:{GIT_BRANCH} ({platform.system()} {platform.release()})') @@ -507,38 +349,24 @@ def configure_wake_on_lan(): def configure_groups(): global GROUPS - GROUPS = CFG['Custom']['remove_group'] - if isinstance(GROUPS, str): GROUPS = GROUPS.split(',') - if GROUPS == ['']: GROUPS = None def configure_remote_paths(): global REMOTE_PATHS - REMOTE_PATHS = CFG['Network']['mount_points'] or [] - if REMOTE_PATHS: if isinstance(REMOTE_PATHS, list): - REMOTE_PATHS = ','.join( - REMOTE_PATHS, - ) # fix in case this imported as list. - - REMOTE_PATHS = ( - # /volume1/Public/,E:\|/volume2/share/,\\NAS\ - tuple(item.split(',')) - for item in REMOTE_PATHS.split('|') - ) - - REMOTE_PATHS = [ - # strip trailing and leading whitespaces - (local.strip(), remote.strip()) - for local, remote in REMOTE_PATHS - ] + # fix in case this imported as list. + REMOTE_PATHS = ','.join(REMOTE_PATHS) + # /volume1/Public/,E:\|/volume2/share/,\\NAS\ + REMOTE_PATHS = (tuple(item.split(',')) for item in REMOTE_PATHS.split('|')) + # strip trailing and leading whitespaces + REMOTE_PATHS = [(local.strip(), remote.strip()) for local, remote in REMOTE_PATHS] def configure_niceness(): @@ -547,9 +375,7 @@ def configure_niceness(): with subprocess.Popen(['nice'], stdout=DEVNULL, stderr=DEVNULL) as proc: proc.communicate() niceness = CFG['Posix']['niceness'] - if ( - len(niceness.split(',')) > 1 - ): # Allow passing of absolute command, not just value. + if len(niceness.split(',')) > 1: # Allow passing of absolute command, not just value. NICENESS.extend(niceness.split(',')) else: NICENESS.extend(['nice', f'-n{int(niceness)}']) @@ -580,29 +406,17 @@ def configure_containers(): global MEDIA_CONTAINER global AUDIO_CONTAINER global META_CONTAINER - - COMPRESSED_CONTAINER = [ - re.compile(r'.r\d{2}$', re.I), - re.compile(r'.part\d+.rar$', re.I), - re.compile('.rar$', re.I), - ] - COMPRESSED_CONTAINER += [ - re.compile(f'{ext}$', re.I) - for ext in CFG['Extensions']['compressedExtensions'] - ] + COMPRESSED_CONTAINER = [re.compile(r'.r\d{2}$', re.I), re.compile(r'.part\d+.rar$', re.I), re.compile('.rar$', re.I)] + COMPRESSED_CONTAINER += [re.compile(f'{ext}$', re.I) for ext in CFG['Extensions']['compressedExtensions']] MEDIA_CONTAINER = CFG['Extensions']['mediaExtensions'] AUDIO_CONTAINER = CFG['Extensions']['audioExtensions'] META_CONTAINER = CFG['Extensions']['metaExtensions'] # .nfo,.sub,.srt - if isinstance(COMPRESSED_CONTAINER, str): COMPRESSED_CONTAINER = COMPRESSED_CONTAINER.split(',') - if isinstance(MEDIA_CONTAINER, str): MEDIA_CONTAINER = MEDIA_CONTAINER.split(',') - if isinstance(AUDIO_CONTAINER, str): AUDIO_CONTAINER = AUDIO_CONTAINER.split(',') - if isinstance(META_CONTAINER, str): META_CONTAINER = META_CONTAINER.split(',') @@ -653,7 +467,6 @@ def configure_transcoder(): global HWACCEL global ALLOWSUBS global DEFAULTS - MOUNTED = None GETSUBS = int(CFG['Transcoder']['getSubs']) TRANSCODE = int(CFG['Transcoder']['transcode']) @@ -715,11 +528,7 @@ def configure_transcoder(): except Exception: pass try: - VBITRATE = int( - (CFG['Transcoder']['outputVideoBitrate'].strip()).replace( - 'k', '000', - ), - ) + VBITRATE = int((CFG['Transcoder']['outputVideoBitrate'].strip()).replace('k', '000')) except Exception: pass VRESOLUTION = CFG['Transcoder']['outputVideoResolution'] @@ -734,11 +543,7 @@ def configure_transcoder(): except Exception: pass try: - ABITRATE = int( - (CFG['Transcoder']['outputAudioBitrate'].strip()).replace( - 'k', '000', - ), - ) + ABITRATE = int((CFG['Transcoder']['outputAudioBitrate'].strip()).replace('k', '000')) except Exception: pass ACODEC2 = CFG['Transcoder']['outputAudioTrack2Codec'].strip() @@ -748,17 +553,11 @@ def configure_transcoder(): if ACODEC2_ALLOW == ['']: ACODEC2_ALLOW = [] try: - ACHANNELS2 = int( - CFG['Transcoder']['outputAudioTrack2Channels'].strip(), - ) + ACHANNELS2 = int(CFG['Transcoder']['outputAudioTrack2Channels'].strip()) except Exception: pass try: - ABITRATE2 = int( - (CFG['Transcoder']['outputAudioTrack2Bitrate'].strip()).replace( - 'k', '000', - ), - ) + ABITRATE2 = int((CFG['Transcoder']['outputAudioTrack2Bitrate'].strip()).replace('k', '000')) except Exception: pass ACODEC3 = CFG['Transcoder']['outputAudioOtherCodec'].strip() @@ -772,529 +571,17 @@ def configure_transcoder(): except Exception: pass try: - ABITRATE3 = int( - (CFG['Transcoder']['outputAudioOtherBitrate'].strip()).replace( - 'k', '000', - ), - ) + ABITRATE3 = int((CFG['Transcoder']['outputAudioOtherBitrate'].strip()).replace('k', '000')) except Exception: pass SCODEC = CFG['Transcoder']['outputSubtitleCodec'].strip() BURN = int(CFG['Transcoder']['burnInSubtitle'].strip()) DEFAULTS = CFG['Transcoder']['outputDefault'].strip() HWACCEL = int(CFG['Transcoder']['hwAccel']) - allow_subs = ['.mkv', '.mp4', '.m4v', 'asf', 'wma', 'wmv'] - codec_alias = { - 'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], - 'libmp3lame': ['libmp3lame', 'mp3'], - 'libfaac': ['libfaac', 'aac', 'faac'], - } - transcode_defaults = { - 'iPad': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': None, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'iPad-1080p': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': '1920:1080', - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': None, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'iPad-720p': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': '1280:720', - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': None, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'Apple-TV': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': '1280:720', - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'ac3', - 'ACODEC_ALLOW': ['ac3'], - 'ABITRATE': None, - 'ACHANNELS': 6, - 'ACODEC2': 'aac', - 'ACODEC2_ALLOW': ['libfaac'], - 'ABITRATE2': None, - 'ACHANNELS2': 2, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'iPod': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': '1280:720', - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': 128000, - 'ACHANNELS': 2, - 'ACODEC2': None, - 'ACODEC2_ALLOW': [], - 'ABITRATE2': None, - 'ACHANNELS2': None, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'iPhone': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': '460:320', - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': 128000, - 'ACHANNELS': 2, - 'ACODEC2': None, - 'ACODEC2_ALLOW': [], - 'ABITRATE2': None, - 'ACHANNELS2': None, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'PS3': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'ac3', - 'ACODEC_ALLOW': ['ac3'], - 'ABITRATE': None, - 'ACHANNELS': 6, - 'ACODEC2': 'aac', - 'ACODEC2_ALLOW': ['libfaac'], - 'ABITRATE2': None, - 'ACHANNELS2': 2, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'xbox': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'ac3', - 'ACODEC_ALLOW': ['ac3'], - 'ABITRATE': None, - 'ACHANNELS': 6, - 'ACODEC2': None, - 'ACODEC2_ALLOW': [], - 'ABITRATE2': None, - 'ACHANNELS2': None, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'Roku-480p': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': 128000, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'Roku-720p': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': 128000, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'Roku-1080p': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': 160000, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - 'mkv': { - 'VEXTENSION': '.mkv', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - 'mpeg2video', - ], - 'ACODEC': 'dts', - 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], - 'ABITRATE': None, - 'ACHANNELS': 8, - 'ACODEC2': None, - 'ACODEC2_ALLOW': [], - 'ABITRATE2': None, - 'ACHANNELS2': None, - 'ACODEC3': 'ac3', - 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], - 'ABITRATE3': None, - 'ACHANNELS3': 8, - 'SCODEC': 'mov_text', - }, - 'mkv-bluray': { - 'VEXTENSION': '.mkv', - 'VCODEC': 'libx265', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'hevc', - 'h265', - 'libx265', - 'h.265', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - 'mpeg2video', - ], - 'ACODEC': 'dts', - 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], - 'ABITRATE': None, - 'ACHANNELS': 8, - 'ACODEC2': None, - 'ACODEC2_ALLOW': [], - 'ABITRATE2': None, - 'ACHANNELS2': None, - 'ACODEC3': 'ac3', - 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], - 'ABITRATE3': None, - 'ACHANNELS3': 8, - 'SCODEC': 'mov_text', - }, - 'mp4-scene-release': { - 'VEXTENSION': '.mp4', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': None, - 'VCRF': 19, - 'VLEVEL': '3.1', - 'VRESOLUTION': None, - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - 'mpeg2video', - ], - 'ACODEC': 'dts', - 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], - 'ABITRATE': None, - 'ACHANNELS': 8, - 'ACODEC2': None, - 'ACODEC2_ALLOW': [], - 'ABITRATE2': None, - 'ACHANNELS2': None, - 'ACODEC3': 'ac3', - 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], - 'ABITRATE3': None, - 'ACHANNELS3': 8, - 'SCODEC': 'mov_text', - }, - 'MKV-SD': { - 'VEXTENSION': '.mkv', - 'VCODEC': 'libx264', - 'VPRESET': None, - 'VFRAMERATE': None, - 'VBITRATE': '1200k', - 'VCRF': None, - 'VLEVEL': None, - 'VRESOLUTION': '720: -1', - 'VCODEC_ALLOW': [ - 'libx264', - 'h264', - 'h.264', - 'AVC', - 'avc', - 'mpeg4', - 'msmpeg4', - 'MPEG-4', - ], - 'ACODEC': 'aac', - 'ACODEC_ALLOW': ['libfaac'], - 'ABITRATE': 128000, - 'ACHANNELS': 2, - 'ACODEC2': 'ac3', - 'ACODEC2_ALLOW': ['ac3'], - 'ABITRATE2': None, - 'ACHANNELS2': 6, - 'ACODEC3': None, - 'ACODEC3_ALLOW': [], - 'ABITRATE3': None, - 'ACHANNELS3': None, - 'SCODEC': 'mov_text', - }, - } + codec_alias = {'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], 'libmp3lame': ['libmp3lame', 'mp3'], 'libfaac': ['libfaac', 'aac', 'faac']} if DEFAULTS and DEFAULTS in transcode_defaults: + transcode_defaults = {'iPad': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPad-1080p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1920:1080', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPad-720p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Apple-TV': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPod': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPhone': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '460:320', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'PS3': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'xbox': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Roku-480p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Roku-720p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Roku-1080p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 160000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'mkv': {'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, 'mkv-bluray': {'VEXTENSION': '.mkv', 'VCODEC': 'libx265', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'hevc', 'h265', 'libx265', 'h.265', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, 'mp4-scene-release': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': 19, 'VLEVEL': '3.1', 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, 'MKV-SD': {'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': '1200k', 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '720: -1', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}} VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION'] VCODEC = transcode_defaults[DEFAULTS]['VCODEC'] VPRESET = transcode_defaults[DEFAULTS]['VPRESET'] @@ -1318,55 +605,38 @@ def configure_transcoder(): ABITRATE3 = transcode_defaults[DEFAULTS]['ABITRATE3'] SCODEC = transcode_defaults[DEFAULTS]['SCODEC'] transcode_defaults = {} # clear memory - if ( - transcode_defaults in ['mp4-scene-release'] - and not OUTPUTQUALITYPERCENT - ): + if transcode_defaults in ['mp4-scene-release'] and not OUTPUTQUALITYPERCENT: OUTPUTQUALITYPERCENT = 100 - if VEXTENSION in allow_subs: ALLOWSUBS = 1 if not VCODEC_ALLOW and VCODEC: VCODEC_ALLOW.extend([VCODEC]) for codec in VCODEC_ALLOW: if codec in codec_alias: - extra = [ - item for item in codec_alias[codec] if item not in VCODEC_ALLOW - ] + extra = [item for item in codec_alias[codec] if item not in VCODEC_ALLOW] VCODEC_ALLOW.extend(extra) if not ACODEC_ALLOW and ACODEC: ACODEC_ALLOW.extend([ACODEC]) for codec in ACODEC_ALLOW: if codec in codec_alias: - extra = [ - item for item in codec_alias[codec] if item not in ACODEC_ALLOW - ] + extra = [item for item in codec_alias[codec] if item not in ACODEC_ALLOW] ACODEC_ALLOW.extend(extra) if not ACODEC2_ALLOW and ACODEC2: ACODEC2_ALLOW.extend([ACODEC2]) for codec in ACODEC2_ALLOW: if codec in codec_alias: - extra = [ - item - for item in codec_alias[codec] - if item not in ACODEC2_ALLOW - ] + extra = [item for item in codec_alias[codec] if item not in ACODEC2_ALLOW] ACODEC2_ALLOW.extend(extra) if not ACODEC3_ALLOW and ACODEC3: ACODEC3_ALLOW.extend([ACODEC3]) for codec in ACODEC3_ALLOW: if codec in codec_alias: - extra = [ - item - for item in codec_alias[codec] - if item not in ACODEC3_ALLOW - ] + extra = [item for item in codec_alias[codec] if item not in ACODEC3_ALLOW] ACODEC3_ALLOW.extend(extra) def configure_passwords_file(): global PASSWORDS_FILE - PASSWORDS_FILE = CFG['passwords']['PassWordFile'] @@ -1374,19 +644,9 @@ def configure_sections(section): global SECTIONS global CATEGORIES # check for script-defied section and if None set to allow sections - SECTIONS = CFG[ - tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) - if not section - else (section,) - ] + SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)] for section, subsections in SECTIONS.items(): - CATEGORIES.extend( - [ - subsection - for subsection in subsections - if CFG[section][subsection].isenabled() - ], - ) + CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()]) CATEGORIES = list(set(CATEGORIES)) @@ -1396,40 +656,31 @@ def configure_utility_locations(): global FFMPEG global FFPROBE global PAR2CMD - # Setup FFMPEG, FFPROBE and SEVENZIP locations if platform.system() == 'Windows': FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg.exe') FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe.exe') - SEVENZIP = os.path.join( - APP_ROOT, 'nzb2media', 'extractor', 'bin', platform.machine(), '7z.exe', - ) + SEVENZIP = os.path.join(APP_ROOT, 'nzb2media', 'extractor', 'bin', platform.machine(), '7z.exe') SHOWEXTRACT = int(str(CFG['Windows']['show_extraction']), 0) - if not (os.path.isfile(FFMPEG)): # problem FFMPEG = None log.warning('Failed to locate ffmpeg.exe. Transcoding disabled!') log.warning('Install ffmpeg with x264 support to enable this feature ...') - if not (os.path.isfile(FFPROBE)): FFPROBE = None if CHECK_MEDIA: log.warning('Failed to locate ffprobe.exe. Video corruption detection disabled!') log.warning('Install ffmpeg with x264 support to enable this feature ...') - else: if SYS_PATH: os.environ['PATH'] += ':' + SYS_PATH - SEVENZIP = which('7z') or which('7zr') or which('7za') if not SEVENZIP: log.warning('Failed to locate 7zip. Transcoding of disk images and extraction of .7z files will not be possible!') - PAR2CMD = which('par2') if not PAR2CMD: PAR2CMD = None log.warning('Failed to locate par2. Repair and rename using par files will not be possible!') - ffmpeg_bin = os.path.join(FFMPEG_PATH, 'ffmpeg') avconv_bin = os.path.join(FFMPEG_PATH, 'avconv') if os.path.isfile(ffmpeg_bin) or os.access(ffmpeg_bin, os.X_OK): @@ -1459,20 +710,15 @@ def configure_utility_locations(): def initialize(section=None): global __INITIALIZED__ - if __INITIALIZED__: return False - configure_logging() configure_process() configure_locale() - configure_migration() configure_logging_part_2() - # initialize the main SB database main_db.upgrade_database(main_db.DBConnection(), databases.InitialSchema) - configure_general() configure_updates() configure_wake_on_lan() @@ -1487,8 +733,6 @@ def initialize(section=None): configure_utility_locations() configure_sections(section) configure_torrent_class() - __INITIALIZED__ = True - # finished initializing return __INITIALIZED__ diff --git a/nzb2media/auto_process/books.py b/nzb2media/auto_process/books.py index af0db050..1bb7d411 100644 --- a/nzb2media/auto_process/books.py +++ b/nzb2media/auto_process/books.py @@ -5,6 +5,7 @@ import logging import requests import nzb2media +import nzb2media.utils.common from nzb2media.auto_process.common import ProcessResult from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.network import server_responding @@ -14,85 +15,42 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - *, - section: str, - dir_name: str, - input_name: str = '', - status: int = 0, - client_agent: str = 'manual', - download_id: str = '', - input_category: str = '', - failure_link: str = '', -) -> ProcessResult: +def process(*, section: str, dir_name: str, input_name: str = '', input_category: str = '', **kwargs) -> ProcessResult: + log.debug(f'Unused kwargs: {kwargs}') # Get configuration if nzb2media.CFG is None: raise RuntimeError('Configuration not loaded.') cfg = nzb2media.CFG[section][input_category] - # Base URL ssl = int(cfg.get('ssl', 0)) scheme = 'https' if ssl else 'http' host = cfg['host'] port = cfg['port'] web_root = cfg.get('web_root', '') - # Authentication apikey = cfg.get('apikey', '') - # Params remote_path = int(cfg.get('remote_path', 0)) - # Misc - # Begin processing url = nzb2media.utils.common.create_url(scheme, host, port, web_root) if not server_responding(url): log.error('Server did not respond. Exiting') - return ProcessResult.failure( - f'{section}: Failed to post-process - {section} did not respond.', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.') input_name, dir_name = convert_to_ascii(input_name, dir_name) - - params = { - 'apikey': apikey, - 'cmd': 'forceProcess', - 'dir': remote_dir(dir_name) if remote_path else dir_name, - } - + params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': remote_dir(dir_name) if remote_path else dir_name} log.debug(f'Opening URL: {url} with params: {params}') - try: response = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: log.error('Unable to open URL') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') log.debug(response.text) - - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status ' - f'{response.status_code}', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}') if response.text == 'OK': - log.debug( - f'SUCCESS: ForceProcess for {dir_name} has been started in LazyLibrarian', - ) - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + log.debug(f'SUCCESS: ForceProcess for {dir_name} has been started in LazyLibrarian') + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') log.error(f'FAILED: ForceProcess of {dir_name} has Failed in LazyLibrarian') - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.') diff --git a/nzb2media/auto_process/comics.py b/nzb2media/auto_process/comics.py index b3546349..bdb3f24a 100644 --- a/nzb2media/auto_process/comics.py +++ b/nzb2media/auto_process/comics.py @@ -6,6 +6,7 @@ import os import requests import nzb2media +import nzb2media.utils.common from nzb2media.auto_process.common import ProcessResult from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.network import server_responding @@ -15,104 +16,58 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - *, - section: str, - dir_name: str, - input_name: str = '', - status: int = 0, - client_agent: str = 'manual', - download_id: str = '', - input_category: str = '', - failure_link: str = '', -) -> ProcessResult: +def process(*, section: str, dir_name: str, input_name: str = '', input_category: str = '', status: int = 0, **kwargs) -> ProcessResult: + log.debug(f'Unused kwargs: {kwargs}') # Get configuration if nzb2media.CFG is None: raise RuntimeError('Configuration not loaded.') cfg = nzb2media.CFG[section][input_category] - # Base URL ssl = int(cfg.get('ssl', 0)) scheme = 'https' if ssl else 'http' host = cfg['host'] port = cfg['port'] web_root = cfg.get('web_root', '') - # Authentication apikey = cfg.get('apikey', '') - # Params remote_path = int(cfg.get('remote_path', 0)) - # Misc apc_version = '2.04' comicrn_version = '1.01' - # Begin processing url = nzb2media.utils.common.create_url(scheme, host, port, web_root) if not server_responding(url): log.error('Server did not respond. Exiting') - return ProcessResult.failure( - f'{section}: Failed to post-process - {section} did not respond.', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.') input_name, dir_name = convert_to_ascii(input_name, dir_name) clean_name, ext = os.path.splitext(input_name) if len(ext) == 4: # we assume this was a standard extension. input_name = clean_name - - params = { - 'cmd': 'forceProcess', - 'apikey': apikey, - 'nzb_folder': remote_dir(dir_name) if remote_path else dir_name, - } - + params = {'cmd': 'forceProcess', 'apikey': apikey, 'nzb_folder': remote_dir(dir_name) if remote_path else dir_name} if input_name is not None: params['nzb_name'] = input_name params['failed'] = int(status) params['apc_version'] = apc_version params['comicrn_version'] = comicrn_version - success = False - log.debug(f'Opening URL: {url}') try: - response = requests.post( - url, params=params, stream=True, verify=False, timeout=(30, 300), - ) + response = requests.post(url, params=params, stream=True, verify=False, timeout=(30, 300)) except requests.ConnectionError: log.error('Unable to open URL') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') + log.debug(response.text) + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status ' - f'{response.status_code}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}') for line in response.text.split('\n'): if line: log.debug(line) if 'Post Processing SUCCESSFUL' in line: success = True - if success: log.debug('SUCCESS: This issue has been processed successfully') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - log.warning( - 'The issue does not appear to have successfully processed. ' - 'Please check your Logs', - ) - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from ' - f'{section} was not as expected.', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') + log.warning('The issue does not appear to have successfully processed. ' 'Please check your Logs') + return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.') diff --git a/nzb2media/auto_process/common.py b/nzb2media/auto_process/common.py index ced991ed..8b134659 100644 --- a/nzb2media/auto_process/common.py +++ b/nzb2media/auto_process/common.py @@ -34,22 +34,11 @@ class ProcessResult(typing.NamedTuple): def command_complete(url, params, headers, section): try: - respone = requests.get( - url, - params=params, - headers=headers, - stream=True, - verify=False, - timeout=(30, 60), - ) + respone = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: log.error(f'Unable to open URL: {url}') return None - if respone.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if respone.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {respone.status_code}') return None try: @@ -63,22 +52,11 @@ def command_complete(url, params, headers, section): def completed_download_handling(url2, headers, section='MAIN'): try: - response = requests.get( - url2, - params={}, - headers=headers, - stream=True, - verify=False, - timeout=(30, 60), - ) + response = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: log.error(f'Unable to open URL: {url2}') return False - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') return False try: diff --git a/nzb2media/auto_process/games.py b/nzb2media/auto_process/games.py index f96169cf..cb3a0c7e 100644 --- a/nzb2media/auto_process/games.py +++ b/nzb2media/auto_process/games.py @@ -7,6 +7,7 @@ import shutil import requests import nzb2media +import nzb2media.utils.common from nzb2media.auto_process.common import ProcessResult from nzb2media.utils.encoding import convert_to_ascii from nzb2media.utils.network import server_responding @@ -15,71 +16,39 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - *, - section: str, - dir_name: str, - input_name: str = '', - status: int = 0, - client_agent: str = 'manual', - download_id: str = '', - input_category: str = '', - failure_link: str = '', -) -> ProcessResult: +def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, input_category: str = '', **kwargs) -> ProcessResult: + log.debug(f'Unused kwargs: {kwargs}') # Get configuration if nzb2media.CFG is None: raise RuntimeError('Configuration not loaded.') cfg = nzb2media.CFG[section][input_category] - # Base URL ssl = int(cfg.get('ssl', 0)) scheme = 'https' if ssl else 'http' host = cfg['host'] port = cfg['port'] web_root = cfg.get('web_root', '') - # Authentication apikey = cfg.get('apikey', '') - # Params - # Misc library = cfg.get('library') - # Begin processing url = nzb2media.utils.common.create_url(scheme, host, port, web_root) if not server_responding(url): log.error('Server did not respond. Exiting') - return ProcessResult.failure( - f'{section}: Failed to post-process - {section} did not respond.', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.') input_name, dir_name = convert_to_ascii(input_name, dir_name) - fields = input_name.split('-') - gamez_id = fields[0].replace('[', '').replace(']', '').replace(' ', '') - download_status = 'Downloaded' if status == 0 else 'Wanted' - - params = { - 'api_key': apikey, - 'mode': 'UPDATEREQUESTEDSTATUS', - 'db_id': gamez_id, - 'status': download_status, - } - + params = {'api_key': apikey, 'mode': 'UPDATEREQUESTEDSTATUS', 'db_id': gamez_id, 'status': download_status} log.debug(f'Opening URL: {url}') - try: resposne = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: log.error('Unable to open URL') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') result = resposne.json() log.debug(result) if library: @@ -88,33 +57,15 @@ def process( shutil.move(dir_name, os.path.join(library, input_name)) except Exception: log.error(f'Unable to move {dir_name} to {os.path.join(library, input_name)}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to move files', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to move files') else: log.error('No library specified to move files to. Please edit your configuration.') - return ProcessResult.failure( - f'{section}: Failed to post-process - No library defined in ' - f'{section}', - ) - - if resposne.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + return ProcessResult.failure(f'{section}: Failed to post-process - No library defined in {section}') + if resposne.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {resposne.status_code}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status ' - f'{resposne.status_code}', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {resposne.status_code}') if result['success']: log.debug(f'SUCCESS: Status for {gamez_id} has been set to {download_status} in Gamez') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') log.error(f'FAILED: Status for {gamez_id} has NOT been updated in Gamez') - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.') diff --git a/nzb2media/auto_process/movies.py b/nzb2media/auto_process/movies.py index 147ea977..0460a34e 100644 --- a/nzb2media/auto_process/movies.py +++ b/nzb2media/auto_process/movies.py @@ -8,6 +8,7 @@ import time import requests import nzb2media +import nzb2media.utils.common from nzb2media import transcoder from nzb2media.auto_process.common import ProcessResult from nzb2media.auto_process.common import command_complete @@ -30,38 +31,24 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - *, - section: str, - dir_name: str, - input_name: str = '', - status: int = 0, - client_agent: str = 'manual', - download_id: str = '', - input_category: str = '', - failure_link: str = '', -) -> ProcessResult: +def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, client_agent: str = 'manual', download_id: str = '', input_category: str = '', failure_link: str = '') -> ProcessResult: # Get configuration if nzb2media.CFG is None: raise RuntimeError('Configuration not loaded.') cfg = nzb2media.CFG[section][input_category] - # Base URL ssl = int(cfg.get('ssl', 0)) scheme = 'https' if ssl else 'http' host = cfg['host'] port = cfg['port'] web_root = cfg.get('web_root', '') - # Authentication apikey = cfg.get('apikey', '') omdbapikey = cfg.get('omdbapikey', '') - # Params delete_failed = int(cfg.get('delete_failed', 0)) remote_path = int(cfg.get('remote_path', 0)) wait_for = int(cfg.get('wait_for', 2)) - # Misc if status > 0 and nzb2media.NOEXTRACTFAILED: extract = 0 @@ -75,7 +62,6 @@ def process( method = cfg.get('method', None) if section != 'CouchPotato': method = None - # Begin processing imdbid = find_imdbid(dir_name, input_name, omdbapikey) if section == 'CouchPotato': @@ -100,10 +86,7 @@ def process( release = None else: log.error('Server did not respond. Exiting') - return ProcessResult.failure( - f'{section}: Failed to post-process - {section} did not respond.', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.') # pull info from release found if available release_id = None media_id = None @@ -118,48 +101,29 @@ def process( release_status_old = release[release_id]['status'] except Exception: pass - - if not os.path.isdir(dir_name) and os.path.isfile( - dir_name, - ): # If the input directory is a file, assume single file download and split dir/name. + if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name. dir_name = os.path.split(os.path.normpath(dir_name))[0] - specific_path = os.path.join(dir_name, str(input_name)) clean_name = os.path.splitext(specific_path) if clean_name[1] == '.nzb': specific_path = clean_name[0] if os.path.isdir(specific_path): dir_name = specific_path - process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - - if ( - not list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ) - and list_media_files( - dir_name, media=False, audio=False, meta=False, archives=True, - ) - and extract - ): + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: log.debug(f'Checking for archives to extract in directory: {dir_name}') extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - good_files = 0 valid_files = 0 num_files = 0 # Check video files for corruption - for video in list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 if transcoder.is_video_good(video, status): good_files += 1 - if not nzb2media.REQUIRE_LAN or transcoder.is_video_good( - video, status, require_lan=nzb2media.REQUIRE_LAN, - ): + if not nzb2media.REQUIRE_LAN or transcoder.is_video_good(video, status, require_lan=nzb2media.REQUIRE_LAN): valid_files += 1 import_subs(video) rename_subs(dir_name) @@ -170,18 +134,13 @@ def process( elif num_files and valid_files < num_files: log.info('Status shown as success from Downloader, but corrupt video files found. Setting as failed.') status = 1 - if ( - 'NZBOP_VERSION' in os.environ - and os.environ['NZBOP_VERSION'][0:5] >= '14.0' - ): + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if good_files == num_files: log.debug(f'Video marked as failed due to missing required language: {nzb2media.REQUIRE_LAN}') else: log.debug('Video marked as failed due to missing playable audio or video') - if ( - good_files < num_files and failure_link - ): # only report corrupt files + if good_files < num_files and failure_link: # only report corrupt files failure_link += '&corrupt=true' elif client_agent == 'manual': log.warning(f'No media files found in directory {dir_name} to manually process.') @@ -192,141 +151,76 @@ def process( else: log.warning(f'No media files found in directory {dir_name}. Processing this as a failed download') status = 1 - if ( - 'NZBOP_VERSION' in os.environ - and os.environ['NZBOP_VERSION'][0:5] >= '14.0' - ): + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') - if status == 0: if nzb2media.TRANSCODE == 1: result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: log.debug(f'Transcoding succeeded for files in {dir_name}') dir_name = new_dir_name - log.debug(f'Config setting \'chmodDirectory\' currently set to {oct(chmod_directory)}') if chmod_directory: log.info(f'Attempting to set the octal permission of \'{oct(chmod_directory)}\' on directory \'{dir_name}\'') rchmod(dir_name, chmod_directory) else: log.error(f'Transcoding failed for files in {dir_name}') - return ProcessResult( - message=f'{section}: Failed to post-process - Transcoding failed', - status_code=1, - ) - for video in list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ): + return ProcessResult(message=f'{section}: Failed to post-process - Transcoding failed', status_code=1) + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): if not release and '.cp(tt' not in video and imdbid: video_name, video_ext = os.path.splitext(video) video2 = f'{video_name}.cp({imdbid}){video_ext}' - if not ( - client_agent in [nzb2media.TORRENT_CLIENT_AGENT, 'manual'] - and nzb2media.USE_LINK == 'move-sym' - ): + if not (client_agent in [nzb2media.TORRENT_CLIENT_AGENT, 'manual'] and nzb2media.USE_LINK == 'move-sym'): log.debug(f'Renaming: {video} to: {video2}') os.rename(video, video2) - if not apikey: # If only using Transcoder functions, exit here. log.info('No CouchPotato or Radarr or Watcher3 apikey entered. Processing completed.') - return ProcessResult( - message=f'{section}: Successfully post-processed {input_name}', - status_code=0, - ) - - params = { - 'media_folder': remote_dir(dir_name) if remote_path else dir_name, - } - + return ProcessResult(message=f'{section}: Successfully post-processed {input_name}', status_code=0) + params = {'media_folder': remote_dir(dir_name) if remote_path else dir_name} if download_id and release_id: params['downloader'] = downloader or client_agent params['download_id'] = download_id - if section == 'CouchPotato': if method == 'manage': command = 'manage.update' params.clear() else: command = 'renamer.scan' - url = f'{base_url}{command}' log.debug(f'Opening URL: {url} with PARAMS: {params}') log.debug(f'Starting {method} scan for {input_name}') - if section == 'Radarr': - payload = { - 'name': 'DownloadedMoviesScan', - 'path': params['media_folder'], - 'downloadClientId': download_id, - 'importMode': import_mode, - } + payload = {'name': 'DownloadedMoviesScan', 'path': params['media_folder'], 'downloadClientId': download_id, 'importMode': import_mode} if not download_id: payload.pop('downloadClientId') log.debug(f'Opening URL: {base_url} with PARAMS: {payload}') log.debug(f'Starting DownloadedMoviesScan scan for {input_name}') - if section == 'Watcher3': - if input_name and os.path.isfile( - os.path.join(dir_name, input_name), - ): - params['media_folder'] = os.path.join( - params['media_folder'], input_name, - ) - payload = { - 'apikey': apikey, - 'path': params['media_folder'], - 'guid': download_id, - 'mode': 'complete', - } + if input_name and os.path.isfile(os.path.join(dir_name, input_name)): + params['media_folder'] = os.path.join(params['media_folder'], input_name) + payload = {'apikey': apikey, 'path': params['media_folder'], 'guid': download_id, 'mode': 'complete'} if not download_id: payload.pop('guid') log.debug(f'Opening URL: {base_url} with PARAMS: {payload}') log.debug(f'Starting postprocessing scan for {input_name}') - try: if section == 'CouchPotato': - response = requests.get( - url, params=params, verify=False, timeout=(30, 1800), - ) + response = requests.get(url, params=params, verify=False, timeout=(30, 1800)) elif section == 'Watcher3': - response = requests.post( - base_url, data=payload, verify=False, timeout=(30, 1800), - ) + response = requests.post(base_url, data=payload, verify=False, timeout=(30, 1800)) else: - response = requests.post( - base_url, - data=json.dumps(payload), - headers=headers, - stream=True, - verify=False, - timeout=(30, 1800), - ) + response = requests.post(base_url, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: log.error('Unable to open URL') - return ProcessResult( - message=f'{section}: Failed to post-process - Unable to connect to {section}', - status_code=1, - ) - + return ProcessResult(message=f'{section}: Failed to post-process - Unable to connect to {section}', status_code=1) result = response.json() - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult( - message=f'{section}: Failed to post-process - Server returned status {response.status_code}', - status_code=1, - ) + return ProcessResult(message=f'{section}: Failed to post-process - Server returned status {response.status_code}', status_code=1) if section == 'CouchPotato' and result['success']: log.debug(f'SUCCESS: Finished {method} scan for folder {dir_name}') if method == 'manage': - return ProcessResult( - message=f'{section}: Successfully post-processed {input_name}', - status_code=0, - ) + return ProcessResult(message=f'{section}: Successfully post-processed {input_name}', status_code=0) elif section == 'Radarr': try: scan_id = int(result['id']) @@ -338,167 +232,85 @@ def process( update_movie_status = result['tasks']['update_movie_status'] log.debug(f'Watcher3 updated status to {section}') if update_movie_status == 'Finished': - return ProcessResult( - message=f'{section}: Successfully post-processed {input_name}', - status_code=status, - ) - return ProcessResult( - message=f'{section}: Failed to post-process - changed status to {update_movie_status}', - status_code=1, - ) + return ProcessResult(message=f'{section}: Successfully post-processed {input_name}', status_code=status) + return ProcessResult(message=f'{section}: Failed to post-process - changed status to {update_movie_status}', status_code=1) else: log.error(f'FAILED: {method} scan was unable to finish for folder {dir_name}. exiting!') - return ProcessResult( - message=f'{section}: Failed to post-process - Server did not return success', - status_code=1, - ) + return ProcessResult(message=f'{section}: Failed to post-process - Server did not return success', status_code=1) else: nzb2media.FAILED = True log.debug(f'FAILED DOWNLOAD DETECTED FOR {input_name}') if failure_link: report_nzb(failure_link, client_agent) - if section == 'Radarr': log.debug(f'SUCCESS: Sending failed download to {section} for CDH processing') return ProcessResult( message=f'{section}: Sending failed download back to {section}', - status_code=1, - # Return as failed to flag this in the downloader. + status_code=1, # Return as failed to flag this in the downloader. ) # Return failed flag, but log the event as successful. if section == 'Watcher3': log.debug(f'Sending failed download to {section} for CDH processing') path = remote_dir(dir_name) if remote_path else dir_name - if input_name and os.path.isfile( - os.path.join(dir_name, input_name), - ): + if input_name and os.path.isfile(os.path.join(dir_name, input_name)): path = os.path.join(path, input_name) - payload = { - 'apikey': apikey, - 'path': path, - 'guid': download_id, - 'mode': 'failed', - } - response = requests.post( - base_url, data=payload, verify=False, timeout=(30, 1800), - ) + payload = {'apikey': apikey, 'path': path, 'guid': download_id, 'mode': 'failed'} + response = requests.post(base_url, data=payload, verify=False, timeout=(30, 1800)) result = response.json() log.debug(f'Watcher3 response: {result}') if result['status'] == 'finished': return ProcessResult( message=f'{section}: Sending failed download back to {section}', - status_code=1, - # Return as failed to flag this in the downloader. + status_code=1, # Return as failed to flag this in the downloader. ) # Return failed flag, but log the event as successful. - - if ( - delete_failed - and os.path.isdir(dir_name) - and not os.path.dirname(dir_name) == dir_name - ): + if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: log.debug(f'Deleting failed files and folder {dir_name}') remove_dir(dir_name) - if not release_id and not media_id: log.error(f'Could not find a downloaded movie in the database matching {input_name}, exiting!') - return ProcessResult( - message='{0}: Failed to post-process - Failed download not found in {0}'.format(section), - status_code=1, - ) - + return ProcessResult(message='{0}: Failed to post-process - Failed download not found in {0}'.format(section), status_code=1) if release_id: log.debug(f'Setting failed release {input_name} to ignored ...') - url = f'{base_url}release.ignore' params = {'id': release_id} - log.debug(f'Opening URL: {url} with PARAMS: {params}') - try: - response = requests.get( - url, params=params, verify=False, - timeout=(30, 120), - ) + response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') - return ProcessResult( - message='{0}: Failed to post-process - Unable to connect to {0}'.format(section), - status_code=1, - ) - + return ProcessResult(message='{0}: Failed to post-process - Unable to connect to {0}'.format(section), status_code=1) result = response.json() - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult( - status_code=1, - message=f'{section}: Failed to post-process - Server returned status {response.status_code}', - ) + return ProcessResult(status_code=1, message=f'{section}: Failed to post-process - Server returned status {response.status_code}') if result['success']: log.debug(f'SUCCESS: {input_name} has been set to ignored ...') else: log.warning(f'FAILED: Unable to set {input_name} to ignored!') - return ProcessResult( - message=f'{section}: Failed to post-process - Unable to set {input_name} to ignored', - status_code=1, - ) - + return ProcessResult(message=f'{section}: Failed to post-process - Unable to set {input_name} to ignored', status_code=1) log.debug('Trying to snatch the next highest ranked release.') - url = f'{base_url}movie.searcher.try_next' log.debug(f'Opening URL: {url}') - try: - response = requests.get( - url, - params={'media_id': media_id}, - verify=False, - timeout=(30, 600), - ) + response = requests.get(url, params={'media_id': media_id}, verify=False, timeout=(30, 600)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') result = response.json() - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status ' - f'{response.status_code}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}') if result['success']: log.debug('SUCCESS: Snatched the next highest release ...') - return ProcessResult.success( - f'{section}: Successfully snatched next highest release', - ) + return ProcessResult.success(f'{section}: Successfully snatched next highest release') log.debug('SUCCESS: Unable to find a new release to snatch now. CP will keep searching!') - return ProcessResult.success( - f'{section}: No new release found now. ' - f'{section} will keep searching', - ) - + return ProcessResult.success(f'{section}: No new release found now. {section} will keep searching') # Added a release that was not in the wanted list so confirm rename # successful by finding this movie media.list. if not release: # we don't want to filter new releases based on this. download_id = '' - if no_status_check: - return ProcessResult.success( - f'{section}: Successfully processed but no change in status ' - f'confirmed', - ) - + return ProcessResult.success(f'{section}: Successfully processed but no change in status confirmed') # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for while time.time() < timeout: # only wait 2 (default) minutes, then return. @@ -512,20 +324,13 @@ def process( try: release_id = list(release.keys())[0] release_status_new = release[release_id]['status'] - if ( - release_status_old is None - ): # we didn't have a release before, but now we do. + if release_status_old is None: # we didn't have a release before, but now we do. title = release[release_id]['title'] log.debug(f'SUCCESS: Movie {title} has now been added to CouchPotato with release status of [{str(release_status_new).upper()}]') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if release_status_new != release_status_old: log.debug(f'SUCCESS: Release {release_id} has now been marked with a status of [{str(release_status_new).upper()}]') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') except Exception: pass elif scan_id: @@ -535,52 +340,29 @@ def process( log.debug(f'The Scan command return status: {command_status}') if command_status in ['completed']: log.debug('The Scan command has completed successfully. Renaming was successful.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if command_status in ['failed']: log.debug('The Scan command has failed. Renaming was not successful.') - # return ProcessResult( - # message='{0}: Failed to post-process {1}'.format(section, input_name), - # status_code=1, - # ) - + # return ProcessResult(message='{0}: Failed to post-process {1}'.format(section, input_name), status_code=1) if not os.path.isdir(dir_name): log.debug(f'SUCCESS: Input Directory [{dir_name}] has been processed and removed') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - - if not list_media_files( - dir_name, media=True, audio=False, meta=False, archives=True, - ): + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=True): log.debug(f'SUCCESS: Input Directory [{dir_name}] has no remaining media files. This has been fully processed.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') # pause and let CouchPotatoServer/Radarr catch its breath time.sleep(10 * wait_for) - # The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now. - if section == 'Radarr' and completed_download_handling( - url2, headers, section=section, - ): + if section == 'Radarr' and completed_download_handling(url2, headers, section=section): log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.') - return ProcessResult.success( - f'{section}: Complete DownLoad Handling is enabled. Passing back ' - f'to {section}', - ) + return ProcessResult.success(f'{section}: Complete DownLoad Handling is enabled. Passing back to {section}') log.warning(f'{input_name} does not appear to have changed status after {wait_for} minutes, Please check your logs.') - return ProcessResult.failure( - f'{section}: Failed to post-process - No change in status', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - No change in status') def get_release(base_url, imdb_id=None, download_id=None, release_id=None): results = {} params = {} - # determine cmd and params to send to CouchPotato to get our results section = 'movies' cmd = 'media.list' @@ -588,20 +370,16 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): section = 'media' cmd = 'media.get' params['id'] = release_id or imdb_id - if not (release_id or imdb_id or download_id): log.debug('No information available to filter CP results') return results - url = f'{base_url}{cmd}' log.debug(f'Opening URL: {url} with PARAMS: {params}') - try: response = requests.get(url, params=params, verify=False, timeout=(30, 60)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') return results - try: result = response.json() except ValueError: @@ -610,7 +388,6 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): for line in response.iter_lines(): log.error(line) return results - if not result['success']: if 'error' in result: log.error(result['error']) @@ -618,7 +395,6 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): id_param = params['id'] log.error(f'no media found for id {id_param}') return results - # Gather release info and return it back, no need to narrow results if release_id: try: @@ -627,9 +403,7 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): return results except Exception: pass - # Gather release info and proceed with trying to narrow results to one release choice - movies = result[section] if not isinstance(movies, list): movies = [movies] @@ -644,18 +418,13 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): if release['status'] not in ['snatched', 'downloaded', 'done']: continue if download_id: - if ( - download_id.lower() - != release['download_info']['id'].lower() - ): + if download_id.lower() != release['download_info']['id'].lower(): continue - key = release['_id'] results[key] = release results[key]['title'] = movie['title'] except Exception: continue - # Narrow results by removing old releases by comparing their last_edit field if len(results) > 1: rem_id = set() @@ -668,20 +437,15 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None): continue for ea_id in rem_id: results.pop(ea_id) - # Search downloads on clients for a match to try and narrow our results down to 1 if len(results) > 1: rem_id = set() for key, val1 in results.items(): try: - if not find_download( - str(val1['download_info']['downloader']).lower(), - val1['download_info']['id'], - ): + if not find_download(str(val1['download_info']['downloader']).lower(), val1['download_info']['id']): rem_id.add(key) except Exception: continue for ea_id in rem_id: results.pop(ea_id) - return results diff --git a/nzb2media/auto_process/music.py b/nzb2media/auto_process/music.py index afdf19a6..30082390 100644 --- a/nzb2media/auto_process/music.py +++ b/nzb2media/auto_process/music.py @@ -8,6 +8,7 @@ import time import requests import nzb2media +import nzb2media.utils.common from nzb2media.auto_process.common import ProcessResult from nzb2media.auto_process.common import command_complete from nzb2media.scene_exceptions import process_all_exceptions @@ -22,118 +23,64 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - *, - section: str, - dir_name: str, - input_name: str = '', - status: int = 0, - client_agent: str = 'manual', - download_id: str = '', - input_category: str = '', - failure_link: str = '', -) -> ProcessResult: +def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, input_category: str = '', **kwargs) -> ProcessResult: + log.debug(f'Unused kwargs: {kwargs}') # Get configuration if nzb2media.CFG is None: raise RuntimeError('Configuration not loaded.') cfg = nzb2media.CFG[section][input_category] - # Base URL ssl = int(cfg.get('ssl', 0)) scheme = 'https' if ssl else 'http' host = cfg['host'] port = cfg['port'] web_root = cfg.get('web_root', '') - # Authentication apikey = cfg.get('apikey', '') - # Params delete_failed = int(cfg.get('delete_failed', 0)) remote_path = int(cfg.get('remote_path', 0)) wait_for = int(cfg.get('wait_for', 2)) - # Misc if status > 0 and nzb2media.NOEXTRACTFAILED: extract = 0 else: extract = int(cfg.get('extract', 0)) - # Begin processing route = f'{web_root}/api/v1' if section == 'Lidarr' else f'{web_root}/api' url = nzb2media.utils.common.create_url(scheme, host, port, route) if not server_responding(url): log.error('Server did not respond. Exiting') - return ProcessResult.failure( - f'{section}: Failed to post-process - {section} did not respond.', - ) - - if not os.path.isdir(dir_name) and os.path.isfile( - dir_name, - ): # If the input directory is a file, assume single file download and split dir/name. + return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.') + if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name. dir_name = os.path.split(os.path.normpath(dir_name))[0] - specific_path = os.path.join(dir_name, str(input_name)) clean_name = os.path.splitext(specific_path) if clean_name[1] == '.nzb': specific_path = clean_name[0] if os.path.isdir(specific_path): dir_name = specific_path - process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - - if ( - not list_media_files( - dir_name, media=False, audio=True, meta=False, archives=False, - ) - and list_media_files( - dir_name, media=False, audio=False, meta=False, archives=True, - ) - and extract - ): + if not list_media_files(dir_name, media=False, audio=True, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: log.debug(f'Checking for archives to extract in directory: {dir_name}') extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - # if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: # logger.info('Status shown as failed from Downloader, but valid video files found. Setting as successful.', section) # status = 0 - if status == 0 and section == 'HeadPhones': - - params = { - 'apikey': apikey, - 'cmd': 'forceProcess', - 'dir': remote_dir(dir_name) if remote_path else dir_name, - } - - res = force_process( - params, url, apikey, input_name, dir_name, section, wait_for, - ) + params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': remote_dir(dir_name) if remote_path else dir_name} + res = force_process(params, url, apikey, input_name, dir_name, section, wait_for) if res.status_code in [0, 1]: return res - - params = { - 'apikey': apikey, - 'cmd': 'forceProcess', - 'dir': os.path.split(remote_dir(dir_name))[0] - if remote_path - else os.path.split(dir_name)[0], - } - - res = force_process( - params, url, apikey, input_name, dir_name, section, wait_for, - ) + params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0]} + res = force_process(params, url, apikey, input_name, dir_name, section, wait_for) if res.status_code in [0, 1]: return res - # The status hasn't changed. uTorrent can resume seeding now. log.warning(f'The music album does not appear to have changed status after {wait_for} minutes. Please check your Logs') - return ProcessResult.failure( - f'{section}: Failed to post-process - No change in wanted status', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - No change in wanted status') if status == 0 and section == 'Lidarr': route = f'{web_root}/api/v1/command' url = nzb2media.utils.common.create_url(scheme, host, port, route) @@ -146,31 +93,17 @@ def process( data = {'name': 'Rename', 'path': dir_name} try: log.debug(f'Opening URL: {url} with data: {data}') - response = requests.post( - url, - data=json.dumps(data), - headers=headers, - stream=True, - verify=False, - timeout=(30, 1800), - ) + response = requests.post(url, data=json.dumps(data), headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: log.error(f'Unable to open URL: {url}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') try: res = response.json() scan_id = int(res['id']) log.debug(f'Scan started with id: {scan_id}') except Exception as error: log.warning(f'No scan id was returned due to: {error}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to start scan', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to start scan') num = 0 params = {} url = f'{url}/{scan_id}' @@ -182,141 +115,80 @@ def process( num += 1 if command_status: log.debug(f'The Scan command return status: {command_status}') - if not os.path.exists(dir_name): log.debug(f'The directory {dir_name} has been removed. Renaming was successful.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if command_status and command_status in ['completed']: log.debug('The Scan command has completed successfully. Renaming was successful.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if command_status and command_status in ['failed']: log.debug('The Scan command has failed. Renaming was not successful.') - # return ProcessResult.failure( - # f'{section}: Failed to post-process {input_name}' - # ) + # return ProcessResult.failure(f'{section}: Failed to post-process {input_name}') else: log.debug(f'The Scan command did not return status completed. Passing back to {section} to attempt complete download handling.') - return ProcessResult( - message=f'{section}: Passing back to {section} to attempt ' - f'Complete Download Handling', - status_code=status, - ) - + return ProcessResult(message=f'{section}: Passing back to {section} to attempt Complete Download Handling', status_code=status) else: if section == 'Lidarr': log.debug(f'FAILED: The download failed. Sending failed download to {section} for CDH processing') # Return as failed to flag this in the downloader. - return ProcessResult.failure( - f'{section}: Download Failed. Sending back to {section}', - ) + return ProcessResult.failure(f'{section}: Download Failed. Sending back to {section}') log.warning('FAILED DOWNLOAD DETECTED') - if ( - delete_failed - and os.path.isdir(dir_name) - and not os.path.dirname(dir_name) == dir_name - ): + if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: log.debug(f'Deleting failed files and folder {dir_name}') remove_dir(dir_name) # Return as failed to flag this in the downloader. - return ProcessResult.failure( - f'{section}: Failed to post-process. {section} does not ' - f'support failed downloads', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process. {section} does not support failed downloads') return ProcessResult.failure() def get_status(url, apikey, dir_name): log.debug(f'Attempting to get current status for release:{os.path.basename(dir_name)}') - - params = { - 'apikey': apikey, - 'cmd': 'getHistory', - } - + params = {'apikey': apikey, 'cmd': 'getHistory'} log.debug(f'Opening URL: {url} with PARAMS: {params}') - try: response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.RequestException: log.error('Unable to open URL') return None - try: result = response.json() except ValueError: # ValueError catches simplejson's JSONDecodeError and json's ValueError return None - for album in result: if os.path.basename(dir_name) == album['FolderName']: return album['Status'].lower() -def force_process( - params, url, apikey, input_name, dir_name, section, wait_for, -): +def force_process(params, url, apikey, input_name, dir_name, section, wait_for): release_status = get_status(url, apikey, dir_name) if not release_status: log.error(f'Could not find a status for {input_name}, is it in the wanted list ?') - log.debug(f'Opening URL: {url} with PARAMS: {params}') - try: response = requests.get(url, params=params, verify=False, timeout=(30, 300)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') log.debug(f'Result: {response.text}') - - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status {response.status_code}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}') if response.text == 'OK': log.debug(f'SUCCESS: Post-Processing started for {input_name} in folder {dir_name} ...') else: log.error(f'FAILED: Post-Processing has NOT started for {input_name} in folder {dir_name}. exiting!') - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.') # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for while time.time() < timeout: current_status = get_status(url, apikey, dir_name) - if ( - current_status is not None and current_status != release_status - ): # Something has changed. CPS must have processed this movie. + if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. log.debug(f'SUCCESS: This release is now marked as status [{current_status}]') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if not os.path.isdir(dir_name): log.debug(f'SUCCESS: The input directory {dir_name} has been removed Processing must have finished.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') time.sleep(10 * wait_for) # The status hasn't changed. - return ProcessResult( - message='no change', - status_code=2, - ) + return ProcessResult(message='no change', status_code=2) diff --git a/nzb2media/auto_process/tv.py b/nzb2media/auto_process/tv.py index 49ee0041..1540eb73 100644 --- a/nzb2media/auto_process/tv.py +++ b/nzb2media/auto_process/tv.py @@ -12,6 +12,7 @@ from oauthlib.oauth2 import LegacyApplicationClient from requests_oauthlib import OAuth2Session import nzb2media +import nzb2media.utils.common from nzb2media import transcoder from nzb2media.auto_process.common import ProcessResult from nzb2media.auto_process.common import command_complete @@ -34,29 +35,17 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - *, - section: str, - dir_name: str, - input_name: str = '', - status: int = 0, - client_agent: str = 'manual', - download_id: str = '', - input_category: str = '', - failure_link: str = '', -) -> ProcessResult: +def process(*, section: str, dir_name: str, input_name: str = '', status: int = 0, client_agent: str = 'manual', download_id: str = '', input_category: str = '', failure_link: str = '') -> ProcessResult: # Get configuration if nzb2media.CFG is None: raise RuntimeError('Configuration not loaded.') cfg = nzb2media.CFG[section][input_category] - # Base URL ssl = int(cfg.get('ssl', 0)) scheme = 'https' if ssl else 'http' host = cfg['host'] port = cfg['port'] web_root = cfg.get('web_root', '') - # Authentication apikey = cfg.get('apikey', '') username = cfg.get('username', '') @@ -64,12 +53,10 @@ def process( api_version = int(cfg.get('api_version', 2)) sso_username = cfg.get('sso_username', '') sso_password = cfg.get('sso_password', '') - # Params delete_failed = int(cfg.get('delete_failed', 0)) remote_path = int(cfg.get('remote_path', 0)) wait_for = int(cfg.get('wait_for', 2)) - # Misc if status > 0 and nzb2media.NOEXTRACTFAILED: extract = 0 @@ -82,13 +69,10 @@ def process( force = int(cfg.get('force', 0)) delete_on = int(cfg.get('delete_on', 0)) ignore_subs = int(cfg.get('ignore_subs', 0)) - # Begin processing - # Refactor into an OO structure. # For now let's do botch the OO and the serialized code, until everything has been migrated. init_sickbeard = InitSickBeard(cfg, section, input_category) - url = nzb2media.utils.common.create_url(scheme, host, port, web_root) if server_responding(url): # auto-detect correct fork @@ -100,27 +84,17 @@ def process( fork, fork_params = 'None', {} else: log.error('Server did not respond. Exiting') - return ProcessResult.failure( - f'{section}: Failed to post-process - {section} did not respond.', - ) - - if ( - client_agent == nzb2media.TORRENT_CLIENT_AGENT - and nzb2media.USE_LINK == 'move-sym' - ): + return ProcessResult.failure(f'{section}: Failed to post-process - {section} did not respond.') + if client_agent == nzb2media.TORRENT_CLIENT_AGENT and nzb2media.USE_LINK == 'move-sym': process_method = 'symlink' - if not os.path.isdir(dir_name) and os.path.isfile( - dir_name, - ): # If the input directory is a file, assume single file download and split dir/name. + if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name. dir_name = os.path.split(os.path.normpath(dir_name))[0] - specific_path = os.path.join(dir_name, str(input_name)) clean_name = os.path.splitext(specific_path) if clean_name[1] == '.nzb': specific_path = clean_name[0] if os.path.isdir(specific_path): dir_name = specific_path - # Attempt to create the directory if it doesn't exist and ignore any # error stating that it already exists. This fixes a bug where SickRage # won't process the directory because it doesn't exist. @@ -131,51 +105,27 @@ def process( # Re-raise the error if it wasn't about the directory not existing if error.errno != errno.EEXIST: raise - - if 'process_method' not in fork_params or ( - client_agent in ['nzbget', 'sabnzbd'] - and nzb_extraction_by != 'Destination' - ): + if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != 'Destination'): if input_name: process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - # Now check if tv files exist in destination. - if not list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ): - if ( - list_media_files( - dir_name, - media=False, - audio=False, - meta=False, - archives=True, - ) - and extract - ): + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): + if list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: log.debug(f'Checking for archives to extract in directory: {dir_name}') extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - - if list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ): # Check that a video exists. if not, assume failed. + if list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. flatten(dir_name) - # Check video files for corruption good_files = 0 valid_files = 0 num_files = 0 - for video in list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 if transcoder.is_video_good(video, status): good_files += 1 - if not nzb2media.REQUIRE_LAN or transcoder.is_video_good( - video, status, require_lan=nzb2media.REQUIRE_LAN, - ): + if not nzb2media.REQUIRE_LAN or transcoder.is_video_good(video, status, require_lan=nzb2media.REQUIRE_LAN): valid_files += 1 import_subs(video) rename_subs(dir_name) @@ -186,18 +136,13 @@ def process( if valid_files < num_files and status == 0: log.info('Found corrupt videos. Setting status Failed') status = 1 - if ( - 'NZBOP_VERSION' in os.environ - and os.environ['NZBOP_VERSION'][0:5] >= '14.0' - ): + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if good_files == num_files: log.debug(f'Video marked as failed due to missing required language: {nzb2media.REQUIRE_LAN}') else: log.debug('Video marked as failed due to missing playable audio or video') - if ( - good_files < num_files and failure_link - ): # only report corrupt files + if good_files < num_files and failure_link: # only report corrupt files failure_link += '&corrupt=true' elif client_agent == 'manual': log.warning(f'No media files found in directory {dir_name} to manually process.') @@ -213,36 +158,23 @@ def process( else: log.warning(f'No media files found in directory {dir_name}. Processing this as a failed download') status = 1 - if ( - 'NZBOP_VERSION' in os.environ - and os.environ['NZBOP_VERSION'][0:5] >= '14.0' - ): + if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') - - if ( - status == 0 and nzb2media.TRANSCODE == 1 - ): # only transcode successful downloads + if status == 0 and nzb2media.TRANSCODE == 1: # only transcode successful downloads result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: log.debug(f'SUCCESS: Transcoding succeeded for files in {dir_name}') dir_name = new_dir_name - log.debug(f'Config setting \'chmodDirectory\' currently set to {oct(chmod_directory)}') if chmod_directory: log.info(f'Attempting to set the octal permission of \'{oct(chmod_directory)}\' on directory \'{dir_name}\'') rchmod(dir_name, chmod_directory) else: log.error(f'FAILED: Transcoding failed for files in {dir_name}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Transcoding failed', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Transcoding failed') # Part of the refactor if init_sickbeard.fork_obj: - init_sickbeard.fork_obj.initialize( - dir_name, input_name, status, client_agent='manual', - ) - + init_sickbeard.fork_obj.initialize(dir_name, input_name, status, client_agent='manual') # configure SB params to pass # We don't want to remove params, for the Forks that have been refactored. # As we don't want to duplicate this part of the code. @@ -251,7 +183,6 @@ def process( fork_params['proc_type'] = 'manual' if input_name is not None: fork_params['nzbName'] = input_name - for param in copy.copy(fork_params): if param == 'failed': if status > 1: @@ -261,67 +192,47 @@ def process( del fork_params['proc_type'] if 'type' in fork_params: del fork_params['type'] - if param == 'return_data': fork_params[param] = 0 if 'quiet' in fork_params: del fork_params['quiet'] - if param == 'type': - if ( - 'type' in fork_params - ): # only set if we haven't already deleted for 'failed' above. + if 'type' in fork_params: # only set if we haven't already deleted for 'failed' above. fork_params[param] = 'manual' if 'proc_type' in fork_params: del fork_params['proc_type'] - - if param in [ - 'dir_name', - 'dir', - 'proc_dir', - 'process_directory', - 'path', - ]: + if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']: fork_params[param] = dir_name if remote_path: fork_params[param] = remote_dir(dir_name) - if param == 'process_method': if process_method: fork_params[param] = process_method else: del fork_params[param] - if param in ['force', 'force_replace']: if force: fork_params[param] = force else: del fork_params[param] - if param in ['delete_on', 'delete']: if delete_on: fork_params[param] = delete_on else: del fork_params[param] - if param == 'ignore_subs': if ignore_subs: fork_params[param] = ignore_subs else: del fork_params[param] - if param == 'force_next': fork_params[param] = 1 - # delete any unused params so we don't pass them to SB by mistake [fork_params.pop(k) for k, v in list(fork_params.items()) if v is None] - if status == 0: if section == 'NzbDrone' and not apikey: log.info('No Sonarr apikey entered. Processing completed.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') log.debug('SUCCESS: The download succeeded, sending a post-process request') else: nzb2media.FAILED = True @@ -332,23 +243,14 @@ def process( elif section == 'NzbDrone': log.debug(f'FAILED: The download failed. Sending failed download to {fork} for CDH processing') # Return as failed to flag this in the downloader. - return ProcessResult.failure( - f'{section}: Download Failed. Sending back to {section}', - ) + return ProcessResult.failure(f'{section}: Download Failed. Sending back to {section}') else: log.debug(f'FAILED: The download failed. {fork} branch does not handle failed downloads. Nothing to process') - if ( - delete_failed - and os.path.isdir(dir_name) - and not os.path.dirname(dir_name) == dir_name - ): + if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: log.debug(f'Deleting failed files and folder {dir_name}') remove_dir(dir_name) # Return as failed to flag this in the downloader. - return ProcessResult.failure( - f'{section}: Failed to post-process. {section} does not support failed downloads', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process. {section} does not support failed downloads') route = '' if section == 'SickBeard': if apikey: @@ -372,20 +274,10 @@ def process( # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: log.debug(f'remote_path: {remote_dir(dir_name)}') - data = { - 'name': 'DownloadedEpisodesScan', - 'path': remote_dir(dir_name), - 'downloadClientId': download_id, - 'importMode': import_mode, - } + data = {'name': 'DownloadedEpisodesScan', 'path': remote_dir(dir_name), 'downloadClientId': download_id, 'importMode': import_mode} else: log.debug(f'path: {dir_name}') - data = { - 'name': 'DownloadedEpisodesScan', - 'path': dir_name, - 'downloadClientId': download_id, - 'importMode': import_mode, - } + data = {'name': 'DownloadedEpisodesScan', 'path': dir_name, 'downloadClientId': download_id, 'importMode': import_mode} if not download_id: data.pop('downloadClientId') url = nzb2media.utils.common.create_url(scheme, host, port, route) @@ -394,7 +286,6 @@ def process( if init_sickbeard.fork_obj: return init_sickbeard.fork_obj.api_call() session = requests.Session() - log.debug(f'Opening URL: {url} with params: {fork_params}') if not apikey and username and password: login = f'{web_root}/login' @@ -402,92 +293,27 @@ def process( response = session.get(login, verify=False, timeout=(30, 60)) if response.status_code in [401, 403] and response.cookies.get('_xsrf'): login_params['_xsrf'] = response.cookies.get('_xsrf') - session.post( - login, - data=login_params, - stream=True, - verify=False, - timeout=(30, 60), - ) - response = session.get( - url, - auth=(username, password), - params=fork_params, - stream=True, - verify=False, - timeout=(30, 1800), - ) + session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) + response = session.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) elif section == 'SiCKRAGE': session = requests.Session() - if api_version >= 2 and sso_username and sso_password: - oauth = OAuth2Session( - client=LegacyApplicationClient( - client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, - ), - ) - oauth_token = oauth.fetch_token( - client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, - token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL, - username=sso_username, - password=sso_password, - ) - session.headers.update( - {'Authorization': 'Bearer ' + oauth_token['access_token']}, - ) - - params = { - 'path': fork_params['path'], - 'failed': str(bool(fork_params['failed'])).lower(), - 'processMethod': 'move', - 'forceReplace': str( - bool(fork_params['force_replace']), - ).lower(), - 'returnData': str( - bool(fork_params['return_data']), - ).lower(), - 'delete': str(bool(fork_params['delete'])).lower(), - 'forceNext': str(bool(fork_params['force_next'])).lower(), - 'nzbName': fork_params['nzbName'], - } + oauth = OAuth2Session(client=LegacyApplicationClient(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID)) + oauth_token = oauth.fetch_token(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL, username=sso_username, password=sso_password) + session.headers.update({'Authorization': 'Bearer ' + oauth_token['access_token']}) + params = {'path': fork_params['path'], 'failed': str(bool(fork_params['failed'])).lower(), 'processMethod': 'move', 'forceReplace': str(bool(fork_params['force_replace'])).lower(), 'returnData': str(bool(fork_params['return_data'])).lower(), 'delete': str(bool(fork_params['delete'])).lower(), 'forceNext': str(bool(fork_params['force_next'])).lower(), 'nzbName': fork_params['nzbName']} else: params = fork_params - - response = session.get( - url, - params=params, - stream=True, - verify=False, - timeout=(30, 1800), - ) + response = session.get(url, params=params, stream=True, verify=False, timeout=(30, 1800)) elif section == 'NzbDrone': log.debug(f'Opening URL: {url} with data: {data}') - response = requests.post( - url, - data=json.dumps(data), - headers=headers, - stream=True, - verify=False, - timeout=(30, 1800), - ) + response = requests.post(url, data=json.dumps(data), headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: log.error(f'Unable to open URL: {url}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Unable to connect to ' - f'{section}', - ) - - if response.status_code not in [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ]: + return ProcessResult.failure(f'{section}: Failed to post-process - Unable to connect to {section}') + if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') - return ProcessResult.failure( - f'{section}: Failed to post-process - Server returned status ' - f'{response.status_code}', - ) - + return ProcessResult.failure(f'{section}: Failed to post-process - Server returned status {response.status_code}') success = False queued = False started = False @@ -504,12 +330,8 @@ def process( input_name = os.path.split(line)[1] if 'added to the queue' in line: queued = True - if ( - 'Processing succeeded' in line - or 'Successfully processed' in line - ): + if 'Processing succeeded' in line or 'Successfully processed' in line: success = True - if queued: time.sleep(60) elif section == 'SiCKRAGE': @@ -528,20 +350,11 @@ def process( log.warning(f'No scan id was returned due to: {error}') scan_id = None started = False - - if ( - status != 0 - and delete_failed - and not os.path.dirname(dir_name) == dir_name - ): + if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name: log.debug(f'Deleting failed files and folder {dir_name}') remove_dir(dir_name) - if success: - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if section == 'NzbDrone' and started: num = 0 params = {} @@ -554,39 +367,20 @@ def process( num += 1 if command_status: log.debug(f'The Scan command return status: {command_status}') - if not os.path.exists(dir_name): log.debug(f'The directory {dir_name} has been removed. Renaming was successful.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if command_status and command_status in ['completed']: log.debug('The Scan command has completed successfully. Renaming was successful.') - return ProcessResult.success( - f'{section}: Successfully post-processed {input_name}', - ) - + return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') if command_status and command_status in ['failed']: log.debug('The Scan command has failed. Renaming was not successful.') - # return ProcessResult.failure( - # f'{section}: Failed to post-process {input_name}' - # ) - + # return ProcessResult.failure(f'{section}: Failed to post-process {input_name}') url2 = nzb2media.utils.common.create_url(scheme, host, port, route2) if completed_download_handling(url2, headers, section=section): log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.') - return ProcessResult( - message=f'{section}: Complete DownLoad Handling is enabled. ' - f'Passing back to {section}', - status_code=status, - ) + return ProcessResult(message=f'{section}: Complete DownLoad Handling is enabled. Passing back to {section}', status_code=status) log.warning('The Scan command did not return a valid status. Renaming was not successful.') - return ProcessResult.failure( - f'{section}: Failed to post-process {input_name}', - ) + return ProcessResult.failure(f'{section}: Failed to post-process {input_name}') # We did not receive Success confirmation. - return ProcessResult.failure( - f'{section}: Failed to post-process - Returned log from {section} ' - f'was not as expected.', - ) + return ProcessResult.failure(f'{section}: Failed to post-process - Returned log from {section} was not as expected.') diff --git a/nzb2media/configuration.py b/nzb2media/configuration.py index a2d64ac5..535ca605 100644 --- a/nzb2media/configuration.py +++ b/nzb2media/configuration.py @@ -30,19 +30,14 @@ class Section(configobj.Section): for section_name, subsections in to_return.items(): for subsection in subsections: try: - value = list( - ConfigObj.find_key(subsections, 'enabled'), - )[0] + value = list(ConfigObj.find_key(subsections, 'enabled'))[0] except Exception: value = 0 - if int(value) != 1: del to_return[section_name][subsection] - # cleanout empty sections and subsections for key in [k for (k, v) in to_return.items() if not v]: del to_return[key] - return to_return def findsection(self, key): @@ -52,24 +47,20 @@ class Section(configobj.Section): value = list(ConfigObj.find_key(to_return[subsection], key))[0] except Exception: value = None - if not value: del to_return[subsection] else: for category in to_return[subsection]: if category != key: del to_return[subsection][category] - # cleanout empty sections and subsections for key in [k for (k, v) in to_return.items() if not v]: del to_return[key] - return to_return def __getitem__(self, key): if key in self.keys(): return dict.__getitem__(self, key) - to_return = copy.deepcopy(self) for section, subsections in to_return.items(): if section in key: @@ -80,16 +71,13 @@ class Section(configobj.Section): continue if key in options: return options[key] - del subsections[subsection] else: if section not in key: del to_return[section] - # cleanout empty sections and subsections for key in [k for (k, v) in to_return.items() if not v]: del to_return[key] - return to_return @@ -116,7 +104,6 @@ class ConfigObj(configobj.ConfigObj, Section): global CFG_NEW, CFG_OLD CFG_NEW = None CFG_OLD = None - try: # check for autoProcessMedia.cfg and create if it does not exist if not nzb2media.CONFIG_FILE.is_file(): @@ -124,7 +111,6 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_OLD = Config(nzb2media.CONFIG_FILE) except Exception as error: log.error(f'Error {error} when copying to .cfg') - try: # check for autoProcessMedia.cfg.spec and create if it does not exist if not nzb2media.CONFIG_SPEC_FILE.is_file(): @@ -132,36 +118,21 @@ class ConfigObj(configobj.ConfigObj, Section): CFG_NEW = Config(nzb2media.CONFIG_SPEC_FILE) except Exception as error: log.error(f'Error {error} when copying to .spec') - # check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail if CFG_NEW is None or CFG_OLD is None: return False - subsections = {} # gather all new-style and old-style sub-sections for newsection in CFG_NEW: if CFG_NEW[newsection].sections: subsections.update({newsection: CFG_NEW[newsection].sections}) - for section in CFG_OLD: if CFG_OLD[section].sections: subsections.update({section: CFG_OLD[section].sections}) for option, value in CFG_OLD[section].items(): - if option in [ - 'category', - 'cpsCategory', - 'sbCategory', - 'srCategory', - 'hpCategory', - 'mlCategory', - 'gzCategory', - 'raCategory', - 'ndCategory', - 'W3Category', - ]: + if option in ['category', 'cpsCategory', 'sbCategory', 'srCategory', 'hpCategory', 'mlCategory', 'gzCategory', 'raCategory', 'ndCategory', 'W3Category']: if not isinstance(value, list): value = [value] - # add subsection subsections.update({section: value}) CFG_OLD[section].pop(option) @@ -171,9 +142,7 @@ class ConfigObj(configobj.ConfigObj, Section): for option, value in values.items(): if section in ['CouchPotato']: if option == ['outputDirectory']: - CFG_NEW['Torrent'][option] = os.path.split( - os.path.normpath(value), - )[0] + CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0] values.pop(option) if section in ['CouchPotato', 'HeadPhones', 'Gamez', 'Mylar']: if option in ['username', 'password']: @@ -185,20 +154,11 @@ class ConfigObj(configobj.ConfigObj, Section): if option == 'failed_fork': # change this old format values['failed'] = 'auto' values.pop(option) - if ( - option == 'outputDirectory' - ): # move this to new location format - CFG_NEW['Torrent'][option] = os.path.split( - os.path.normpath(value), - )[0] + if option == 'outputDirectory': # move this to new location format + CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0] values.pop(option) if section in ['Torrent']: - if option in [ - 'compressedExtensions', - 'mediaExtensions', - 'metaExtensions', - 'minSampleSize', - ]: + if option in ['compressedExtensions', 'mediaExtensions', 'metaExtensions', 'minSampleSize']: CFG_NEW['Extensions'][option] = value values.pop(option) if option == 'useLink': # Sym links supported now as well. @@ -210,9 +170,7 @@ class ConfigObj(configobj.ConfigObj, Section): if option == 'forceClean': CFG_NEW['General']['force_clean'] = value values.pop(option) - if ( - option == 'qBittorrenHost' - ): # We had a typo that is now fixed. + if option == 'qBittorrenHost': # We had a typo that is now fixed. CFG_NEW['Torrent']['qBittorrentHost'] = value values.pop(option) if section in ['Transcoder']: @@ -225,14 +183,12 @@ class ConfigObj(configobj.ConfigObj, Section): elif not value: value = 0 values[option] = value - # remove any options that we no longer need so they don't migrate into our new config if not list(ConfigObj.find_key(CFG_NEW, option)): try: values.pop(option) except Exception: pass - return values def process_section(section, subsections=None): @@ -245,9 +201,7 @@ class ConfigObj(configobj.ConfigObj, Section): for option, value in values.items(): CFG_NEW[section][subsection][option] = value elif subsection in CFG_OLD[section].sections: - values = cleanup_values( - CFG_OLD[section][subsection], section, - ) + values = cleanup_values(CFG_OLD[section][subsection], section) if subsection not in CFG_NEW[section].sections: CFG_NEW[section][subsection] = {} for option, value in values.items(): @@ -264,96 +218,51 @@ class ConfigObj(configobj.ConfigObj, Section): subsection = None if section in list(chain.from_iterable(subsections.values())): subsection = section - section = ''.join( - [k for k, v in subsections.items() if subsection in v], - ) + section = ''.join([k for k, v in subsections.items() if subsection in v]) process_section(section, subsection) elif section in subsections.keys(): subsection = subsections[section] process_section(section, subsection) elif section in CFG_OLD.keys(): process_section(section, subsection) - # migrate SiCRKAGE settings from SickBeard section to new dedicated SiCRKAGE section - if ( - CFG_OLD['SickBeard']['tv']['enabled'] - and CFG_OLD['SickBeard']['tv']['fork'] == 'sickrage-api' - ): + if CFG_OLD['SickBeard']['tv']['enabled'] and CFG_OLD['SickBeard']['tv']['fork'] == 'sickrage-api': for option, value in CFG_OLD['SickBeard']['tv'].items(): if option in CFG_NEW['SiCKRAGE']['tv']: CFG_NEW['SiCKRAGE']['tv'][option] = value - # set API version to 1 if API key detected and no SSO username is set - if ( - CFG_NEW['SiCKRAGE']['tv']['apikey'] - and not CFG_NEW['SiCKRAGE']['tv']['sso_username'] - ): + if CFG_NEW['SiCKRAGE']['tv']['apikey'] and not CFG_NEW['SiCKRAGE']['tv']['sso_username']: CFG_NEW['SiCKRAGE']['tv']['api_version'] = 1 - # disable SickBeard section CFG_NEW['SickBeard']['tv']['enabled'] = 0 CFG_NEW['SickBeard']['tv']['fork'] = 'auto' - # create a backup of our old config CFG_OLD.filename = f'{nzb2media.CONFIG_FILE}.old' CFG_OLD.write() - # write our new config to autoProcessMedia.cfg CFG_NEW.filename = nzb2media.CONFIG_FILE CFG_NEW.write() - return True @staticmethod def addnzbget(): # load configs into memory cfg_new = Config() - try: - if ( - 'NZBPO_NDCATEGORY' in os.environ - and 'NZBPO_SBCATEGORY' in os.environ - ): - if ( - os.environ['NZBPO_NDCATEGORY'] - == os.environ['NZBPO_SBCATEGORY'] - ): + if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ: + if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']: log.warning('{x} category is set for SickBeard and Sonarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_NDCATEGORY'])) - if ( - 'NZBPO_RACATEGORY' in os.environ - and 'NZBPO_CPSCATEGORY' in os.environ - ): - if ( - os.environ['NZBPO_RACATEGORY'] - == os.environ['NZBPO_CPSCATEGORY'] - ): + if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ: + if os.environ['NZBPO_RACATEGORY'] == os.environ['NZBPO_CPSCATEGORY']: log.warning('{x} category is set for CouchPotato and Radarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_RACATEGORY'])) - if ( - 'NZBPO_RACATEGORY' in os.environ - and 'NZBPO_W3CATEGORY' in os.environ - ): - if ( - os.environ['NZBPO_RACATEGORY'] - == os.environ['NZBPO_W3CATEGORY'] - ): + if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_W3CATEGORY' in os.environ: + if os.environ['NZBPO_RACATEGORY'] == os.environ['NZBPO_W3CATEGORY']: log.warning('{x} category is set for Watcher3 and Radarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_RACATEGORY'])) - if ( - 'NZBPO_W3CATEGORY' in os.environ - and 'NZBPO_CPSCATEGORY' in os.environ - ): - if ( - os.environ['NZBPO_W3CATEGORY'] - == os.environ['NZBPO_CPSCATEGORY'] - ): + if 'NZBPO_W3CATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ: + if os.environ['NZBPO_W3CATEGORY'] == os.environ['NZBPO_CPSCATEGORY']: log.warning('{x} category is set for CouchPotato and Watcher3. Please check your config in NZBGet'.format(x=os.environ['NZBPO_W3CATEGORY'])) - if ( - 'NZBPO_LICATEGORY' in os.environ - and 'NZBPO_HPCATEGORY' in os.environ - ): - if ( - os.environ['NZBPO_LICATEGORY'] - == os.environ['NZBPO_HPCATEGORY'] - ): + if 'NZBPO_LICATEGORY' in os.environ and 'NZBPO_HPCATEGORY' in os.environ: + if os.environ['NZBPO_LICATEGORY'] == os.environ['NZBPO_HPCATEGORY']: log.warning('{x} category is set for HeadPhones and Lidarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_LICATEGORY'])) section = 'Nzb' key = 'NZBOP_DESTDIR' @@ -361,29 +270,15 @@ class ConfigObj(configobj.ConfigObj, Section): option = 'default_downloadDirectory' value = os.environ[key] cfg_new[section][option] = value - section = 'General' - env_keys = [ - 'AUTO_UPDATE', - 'CHECK_MEDIA', - 'REQUIRE_LAN', - 'SAFE_MODE', - 'NO_EXTRACT_FAILED', - ] - cfg_keys = [ - 'auto_update', - 'check_media', - 'require_lan', - 'safe_mode', - 'no_extract_failed', - ] + env_keys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'REQUIRE_LAN', 'SAFE_MODE', 'NO_EXTRACT_FAILED'] + cfg_keys = ['auto_update', 'check_media', 'require_lan', 'safe_mode', 'no_extract_failed'] for index in range(len(env_keys)): key = f'NZBPO_{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value - section = 'Network' env_keys = ['MOUNTPOINTS'] cfg_keys = ['mount_points'] @@ -393,571 +288,199 @@ class ConfigObj(configobj.ConfigObj, Section): option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value - section = 'CouchPotato' env_cat_key = 'NZBPO_CPSCATEGORY' - env_keys = [ - 'ENABLED', - 'APIKEY', - 'HOST', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'METHOD', - 'DELETE_FAILED', - 'REMOTE_PATH', - 'WAIT_FOR', - 'WATCH_DIR', - 'OMDBAPIKEY', - ] - cfg_keys = [ - 'enabled', - 'apikey', - 'host', - 'port', - 'ssl', - 'web_root', - 'method', - 'delete_failed', - 'remote_path', - 'wait_for', - 'watch_dir', - 'omdbapikey', - ] + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_CPS{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['Radarr'].sections: cfg_new['Radarr'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['Watcher3'].sections: cfg_new['Watcher3'][env_cat_key]['enabled'] = 0 - section = 'Watcher3' env_cat_key = 'NZBPO_W3CATEGORY' - env_keys = [ - 'ENABLED', - 'APIKEY', - 'HOST', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'METHOD', - 'DELETE_FAILED', - 'REMOTE_PATH', - 'WAIT_FOR', - 'WATCH_DIR', - 'OMDBAPIKEY', - ] - cfg_keys = [ - 'enabled', - 'apikey', - 'host', - 'port', - 'ssl', - 'web_root', - 'method', - 'delete_failed', - 'remote_path', - 'wait_for', - 'watch_dir', - 'omdbapikey', - ] + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_W3{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['Radarr'].sections: cfg_new['Radarr'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections: cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0 - section = 'SickBeard' env_cat_key = 'NZBPO_SBCATEGORY' - env_keys = [ - 'ENABLED', - 'HOST', - 'PORT', - 'APIKEY', - 'USERNAME', - 'PASSWORD', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'FORK', - 'DELETE_FAILED', - 'TORRENT_NOLINK', - 'NZBEXTRACTIONBY', - 'REMOTE_PATH', - 'PROCESS_METHOD', - ] - cfg_keys = [ - 'enabled', - 'host', - 'port', - 'apikey', - 'username', - 'password', - 'ssl', - 'web_root', - 'watch_dir', - 'fork', - 'delete_failed', - 'Torrent_NoLink', - 'nzbExtractionBy', - 'remote_path', - 'process_method', - ] + env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] + cfg_keys = ['enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_SB{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['SiCKRAGE'].sections: cfg_new['SiCKRAGE'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections: cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0 - section = 'SiCKRAGE' env_cat_key = 'NZBPO_SRCATEGORY' - env_keys = [ - 'ENABLED', - 'HOST', - 'PORT', - 'APIKEY', - 'API_VERSION', - 'SSO_USERNAME', - 'SSO_PASSWORD', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'FORK', - 'DELETE_FAILED', - 'TORRENT_NOLINK', - 'NZBEXTRACTIONBY', - 'REMOTE_PATH', - 'PROCESS_METHOD', - ] - cfg_keys = [ - 'enabled', - 'host', - 'port', - 'apikey', - 'api_version', - 'sso_username', - 'sso_password', - 'ssl', - 'web_root', - 'watch_dir', - 'fork', - 'delete_failed', - 'Torrent_NoLink', - 'nzbExtractionBy', - 'remote_path', - 'process_method', - ] + env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'API_VERSION', 'SSO_USERNAME', 'SSO_PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] + cfg_keys = ['enabled', 'host', 'port', 'apikey', 'api_version', 'sso_username', 'sso_password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_SR{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['SickBeard'].sections: cfg_new['SickBeard'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections: cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0 - section = 'HeadPhones' env_cat_key = 'NZBPO_HPCATEGORY' - env_keys = [ - 'ENABLED', - 'APIKEY', - 'HOST', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'WAIT_FOR', - 'WATCH_DIR', - 'REMOTE_PATH', - 'DELETE_FAILED', - ] - cfg_keys = [ - 'enabled', - 'apikey', - 'host', - 'port', - 'ssl', - 'web_root', - 'wait_for', - 'watch_dir', - 'remote_path', - 'delete_failed', - ] + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH', 'DELETE_FAILED'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path', 'delete_failed'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_HP{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['Lidarr'].sections: cfg_new['Lidarr'][env_cat_key]['enabled'] = 0 - section = 'Mylar' env_cat_key = 'NZBPO_MYCATEGORY' - env_keys = [ - 'ENABLED', - 'HOST', - 'PORT', - 'USERNAME', - 'PASSWORD', - 'APIKEY', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'REMOTE_PATH', - ] - cfg_keys = [ - 'enabled', - 'host', - 'port', - 'username', - 'password', - 'apikey', - 'ssl', - 'web_root', - 'watch_dir', - 'remote_path', - ] + env_keys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] + cfg_keys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_MY{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 - section = 'Gamez' env_cat_key = 'NZBPO_GZCATEGORY' - env_keys = [ - 'ENABLED', - 'APIKEY', - 'HOST', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'LIBRARY', - 'REMOTE_PATH', - ] - cfg_keys = [ - 'enabled', - 'apikey', - 'host', - 'port', - 'ssl', - 'web_root', - 'watch_dir', - 'library', - 'remote_path', - ] + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_GZ{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 - section = 'LazyLibrarian' env_cat_key = 'NZBPO_LLCATEGORY' - env_keys = [ - 'ENABLED', - 'APIKEY', - 'HOST', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'REMOTE_PATH', - ] - cfg_keys = [ - 'enabled', - 'apikey', - 'host', - 'port', - 'ssl', - 'web_root', - 'watch_dir', - 'remote_path', - ] + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'remote_path'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_LL{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 - section = 'NzbDrone' env_cat_key = 'NZBPO_NDCATEGORY' - env_keys = [ - 'ENABLED', - 'HOST', - 'APIKEY', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'FORK', - 'DELETE_FAILED', - 'TORRENT_NOLINK', - 'NZBEXTRACTIONBY', - 'WAIT_FOR', - 'DELETE_FAILED', - 'REMOTE_PATH', - 'IMPORTMODE', - ] + env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH', 'IMPORTMODE'] # new cfgKey added for importMode - cfg_keys = [ - 'enabled', - 'host', - 'apikey', - 'port', - 'ssl', - 'web_root', - 'watch_dir', - 'fork', - 'delete_failed', - 'Torrent_NoLink', - 'nzbExtractionBy', - 'wait_for', - 'delete_failed', - 'remote_path', - 'importMode', - ] + cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'importMode'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_ND{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['SickBeard'].sections: cfg_new['SickBeard'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['SiCKRAGE'].sections: cfg_new['SiCKRAGE'][env_cat_key]['enabled'] = 0 - section = 'Radarr' env_cat_key = 'NZBPO_RACATEGORY' - env_keys = [ - 'ENABLED', - 'HOST', - 'APIKEY', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'FORK', - 'DELETE_FAILED', - 'TORRENT_NOLINK', - 'NZBEXTRACTIONBY', - 'WAIT_FOR', - 'DELETE_FAILED', - 'REMOTE_PATH', - 'OMDBAPIKEY', - 'IMPORTMODE', - ] + env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH', 'OMDBAPIKEY', 'IMPORTMODE'] # new cfgKey added for importMode - cfg_keys = [ - 'enabled', - 'host', - 'apikey', - 'port', - 'ssl', - 'web_root', - 'watch_dir', - 'fork', - 'delete_failed', - 'Torrent_NoLink', - 'nzbExtractionBy', - 'wait_for', - 'delete_failed', - 'remote_path', - 'omdbapikey', - 'importMode', - ] + cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'omdbapikey', 'importMode'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_RA{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections: cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0 if os.environ[env_cat_key] in cfg_new['Wacther3'].sections: cfg_new['Watcher3'][env_cat_key]['enabled'] = 0 - section = 'Lidarr' env_cat_key = 'NZBPO_LICATEGORY' - env_keys = [ - 'ENABLED', - 'HOST', - 'APIKEY', - 'PORT', - 'SSL', - 'WEB_ROOT', - 'WATCH_DIR', - 'FORK', - 'DELETE_FAILED', - 'TORRENT_NOLINK', - 'NZBEXTRACTIONBY', - 'WAIT_FOR', - 'DELETE_FAILED', - 'REMOTE_PATH', - ] - cfg_keys = [ - 'enabled', - 'host', - 'apikey', - 'port', - 'ssl', - 'web_root', - 'watch_dir', - 'fork', - 'delete_failed', - 'Torrent_NoLink', - 'nzbExtractionBy', - 'wait_for', - 'delete_failed', - 'remote_path', - ] + env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] + cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_LI{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 if os.environ[env_cat_key] in cfg_new['HeadPhones'].sections: cfg_new['HeadPhones'][env_cat_key]['enabled'] = 0 - section = 'Extensions' - env_keys = [ - 'COMPRESSEDEXTENSIONS', - 'MEDIAEXTENSIONS', - 'METAEXTENSIONS', - ] - cfg_keys = [ - 'compressedExtensions', - 'mediaExtensions', - 'metaExtensions', - ] + env_keys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS'] + cfg_keys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] for index in range(len(env_keys)): key = f'NZBPO_{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value - section = 'Posix' env_keys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] cfg_keys = ['niceness', 'ionice_class', 'ionice_classdata'] @@ -967,93 +490,15 @@ class ConfigObj(configobj.ConfigObj, Section): option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value - section = 'Transcoder' - env_keys = [ - 'TRANSCODE', - 'DUPLICATE', - 'IGNOREEXTENSIONS', - 'OUTPUTFASTSTART', - 'OUTPUTVIDEOPATH', - 'PROCESSOUTPUT', - 'AUDIOLANGUAGE', - 'ALLAUDIOLANGUAGES', - 'SUBLANGUAGES', - 'ALLSUBLANGUAGES', - 'EMBEDSUBS', - 'BURNINSUBTITLE', - 'EXTRACTSUBS', - 'EXTERNALSUBDIR', - 'OUTPUTDEFAULT', - 'OUTPUTVIDEOEXTENSION', - 'OUTPUTVIDEOCODEC', - 'VIDEOCODECALLOW', - 'OUTPUTVIDEOPRESET', - 'OUTPUTVIDEOFRAMERATE', - 'OUTPUTVIDEOBITRATE', - 'OUTPUTAUDIOCODEC', - 'AUDIOCODECALLOW', - 'OUTPUTAUDIOBITRATE', - 'OUTPUTQUALITYPERCENT', - 'GETSUBS', - 'OUTPUTAUDIOTRACK2CODEC', - 'AUDIOCODEC2ALLOW', - 'OUTPUTAUDIOTRACK2BITRATE', - 'OUTPUTAUDIOOTHERCODEC', - 'AUDIOOTHERCODECALLOW', - 'OUTPUTAUDIOOTHERBITRATE', - 'OUTPUTSUBTITLECODEC', - 'OUTPUTAUDIOCHANNELS', - 'OUTPUTAUDIOTRACK2CHANNELS', - 'OUTPUTAUDIOOTHERCHANNELS', - 'OUTPUTVIDEORESOLUTION', - ] - cfg_keys = [ - 'transcode', - 'duplicate', - 'ignoreExtensions', - 'outputFastStart', - 'outputVideoPath', - 'processOutput', - 'audioLanguage', - 'allAudioLanguages', - 'subLanguages', - 'allSubLanguages', - 'embedSubs', - 'burnInSubtitle', - 'extractSubs', - 'externalSubDir', - 'outputDefault', - 'outputVideoExtension', - 'outputVideoCodec', - 'VideoCodecAllow', - 'outputVideoPreset', - 'outputVideoFramerate', - 'outputVideoBitrate', - 'outputAudioCodec', - 'AudioCodecAllow', - 'outputAudioBitrate', - 'outputQualityPercent', - 'getSubs', - 'outputAudioTrack2Codec', - 'AudioCodec2Allow', - 'outputAudioTrack2Bitrate', - 'outputAudioOtherCodec', - 'AudioOtherCodecAllow', - 'outputAudioOtherBitrate', - 'outputSubtitleCodec', - 'outputAudioChannels', - 'outputAudioTrack2Channels', - 'outputAudioOtherChannels', - 'outputVideoResolution', - ] + env_keys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', 'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC', 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS', 'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE', 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', 'OUTPUTAUDIOOTHERCHANNELS', 'OUTPUTVIDEORESOLUTION'] + cfg_keys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', 'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', 'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec', 'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs', 'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate', 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels', 'outputVideoResolution'] for index in range(len(env_keys)): key = f'NZBPO_{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value - section = 'WakeOnLan' env_keys = ['WAKE', 'HOST', 'PORT', 'MAC'] cfg_keys = ['wake', 'host', 'port', 'mac'] @@ -1063,55 +508,28 @@ class ConfigObj(configobj.ConfigObj, Section): option = cfg_keys[index] value = os.environ[key] cfg_new[section][option] = value - section = 'UserScript' env_cat_key = 'NZBPO_USCATEGORY' - env_keys = [ - 'USER_SCRIPT_MEDIAEXTENSIONS', - 'USER_SCRIPT_PATH', - 'USER_SCRIPT_PARAM', - 'USER_SCRIPT_RUNONCE', - 'USER_SCRIPT_SUCCESSCODES', - 'USER_SCRIPT_CLEAN', - 'USDELAY', - 'USREMOTE_PATH', - ] - cfg_keys = [ - 'user_script_mediaExtensions', - 'user_script_path', - 'user_script_param', - 'user_script_runOnce', - 'user_script_successCodes', - 'user_script_clean', - 'delay', - 'remote_path', - ] + env_keys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] + cfg_keys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] if env_cat_key in os.environ: for index in range(len(env_keys)): key = f'NZBPO_{env_keys[index]}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] - if ( - os.environ[env_cat_key] - not in cfg_new[section].sections - ): + if os.environ[env_cat_key] not in cfg_new[section].sections: cfg_new[section][os.environ[env_cat_key]] = {} - cfg_new[section][os.environ[env_cat_key]][ - option - ] = value + cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 - except Exception as error: log.debug(f'Error {error} when applying NZBGet config') - try: # write our new config to autoProcessMedia.cfg cfg_new.filename = nzb2media.CONFIG_FILE cfg_new.write() except Exception as error: log.debug(f'Error {error} when writing changes to .cfg') - return cfg_new diff --git a/nzb2media/databases.py b/nzb2media/databases.py index 1ae3874d..5f6eb55a 100644 --- a/nzb2media/databases.py +++ b/nzb2media/databases.py @@ -8,7 +8,6 @@ from nzb2media.utils.files import backup_versioned_file log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - MIN_DB_VERSION = 1 # oldest db version we support migrating from MAX_DB_VERSION = 2 @@ -26,8 +25,6 @@ def backup_database(version): # = Main DB Migrations = # ====================== # Add new migrations at the bottom of the list; subclass the previous migration. - - class InitialSchema(main_db.SchemaUpgrade): def test(self): no_update = False @@ -37,34 +34,19 @@ class InitialSchema(main_db.SchemaUpgrade): return no_update def execute(self): - if not self.has_table('downloads') and not self.has_table( - 'db_version', - ): - queries = [ - 'CREATE TABLE db_version (db_version INTEGER);', - 'CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', - 'INSERT INTO db_version (db_version) VALUES (2);', - ] + if not self.has_table('downloads') and not self.has_table('db_version'): + queries = ['CREATE TABLE db_version (db_version INTEGER);', 'CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', 'INSERT INTO db_version (db_version) VALUES (2);'] for query in queries: self.connection.action(query) - else: cur_db_version = self.check_db_version() - if cur_db_version < MIN_DB_VERSION: log.critical(f'Your database version ({cur_db_version}) is too old to migrate from what this version of nzbToMedia supports ({MIN_DB_VERSION}).\nPlease remove nzbtomedia.db file to begin fresh.') sys.exit(1) - if cur_db_version > MAX_DB_VERSION: log.critical(f'Your database version ({cur_db_version}) has been incremented past what this version of nzbToMedia supports ({MAX_DB_VERSION}).\nIf you have used other forks of nzbToMedia, your database may be unusable due to their modifications.') sys.exit(1) if cur_db_version < MAX_DB_VERSION: # We need to upgrade. - queries = [ - 'CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', - 'INSERT INTO downloads2 SELECT * FROM downloads;', - 'DROP TABLE IF EXISTS downloads;', - 'ALTER TABLE downloads2 RENAME TO downloads;', - 'INSERT INTO db_version (db_version) VALUES (2);', - ] + queries = ['CREATE TABLE downloads2 (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', 'INSERT INTO downloads2 SELECT * FROM downloads;', 'DROP TABLE IF EXISTS downloads;', 'ALTER TABLE downloads2 RENAME TO downloads;', 'INSERT INTO db_version (db_version) VALUES (2);'] for query in queries: self.connection.action(query) diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index 5f42aacd..78a07163 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -6,9 +6,9 @@ import platform import shutil import stat import subprocess -from subprocess import call -from subprocess import Popen from subprocess import DEVNULL +from subprocess import Popen +from subprocess import call from time import sleep import nzb2media @@ -24,68 +24,18 @@ def extract(file_path, output_destination): if not os.path.exists(nzb2media.SEVENZIP): log.error('EXTRACTOR: Could not find 7-zip, Exiting') return False - wscriptlocation = os.path.join( - os.environ['WINDIR'], 'system32', 'wscript.exe', - ) - invislocation = os.path.join( - nzb2media.APP_ROOT, 'nzb2media', 'extractor', 'bin', 'invisible.vbs', - ) - cmd_7zip = [ - wscriptlocation, - invislocation, - str(nzb2media.SHOWEXTRACT), - nzb2media.SEVENZIP, - 'x', - '-y', - ] - ext_7zip = [ - '.rar', - '.zip', - '.tar.gz', - 'tgz', - '.tar.bz2', - '.tbz', - '.tar.lzma', - '.tlz', - '.7z', - '.xz', - '.gz', - ] + wscriptlocation = os.path.join(os.environ['WINDIR'], 'system32', 'wscript.exe') + invislocation = os.path.join(nzb2media.APP_ROOT, 'nzb2media', 'extractor', 'bin', 'invisible.vbs') + cmd_7zip = [wscriptlocation, invislocation, str(nzb2media.SHOWEXTRACT), nzb2media.SEVENZIP, 'x', '-y'] + ext_7zip = ['.rar', '.zip', '.tar.gz', 'tgz', '.tar.bz2', '.tbz', '.tar.lzma', '.tlz', '.7z', '.xz', '.gz'] extract_commands = dict.fromkeys(ext_7zip, cmd_7zip) # Using unix else: - required_cmds = [ - 'unrar', - 'unzip', - 'tar', - 'unxz', - 'unlzma', - '7zr', - 'bunzip2', - 'gunzip', - ] + required_cmds = ['unrar', 'unzip', 'tar', 'unxz', 'unlzma', '7zr', 'bunzip2', 'gunzip'] # ## Possible future suport: # gunzip: gz (cmd will delete original archive) # ## the following do not extract to dest dir - # '.xz': ['xz', '-d --keep'], - # '.lzma': ['xz', '-d --format=lzma --keep'], - # '.bz2': ['bzip2', '-d --keep'], - - extract_commands = { - '.rar': ['unrar', 'x', '-o+', '-y'], - '.tar': ['tar', '-xf'], - '.zip': ['unzip'], - '.tar.gz': ['tar', '-xzf'], - '.tgz': ['tar', '-xzf'], - '.tar.bz2': ['tar', '-xjf'], - '.tbz': ['tar', '-xjf'], - '.tar.lzma': ['tar', '--lzma', '-xf'], - '.tlz': ['tar', '--lzma', '-xf'], - '.tar.xz': ['tar', '--xz', '-xf'], - '.txz': ['tar', '--xz', '-xf'], - '.7z': ['7zr', 'x'], - '.gz': ['gunzip'], - } + # '.xz': ['xz', '-d --keep'], # '.lzma': ['xz', '-d --format=lzma --keep'], # '.bz2': ['bzip2', '-d --keep'], extract_commands = { '.rar': ['unrar', 'x', '-o+', '-y'], '.tar': ['tar', '-xf'], '.zip': ['unzip'], '.tar.gz': ['tar', '-xzf'], '.tgz': ['tar', '-xzf'], '.tar.bz2': ['tar', '-xjf'], '.tbz': ['tar', '-xjf'], '.tar.lzma': ['tar', '--lzma', '-xf'], '.tlz': ['tar', '--lzma', '-xf'], '.tar.xz': ['tar', '--xz', '-xf'], '.txz': ['tar', '--xz', '-xf'], '.7z': ['7zr', 'x'], '.gz': ['gunzip'], } # Test command exists and if not, remove if not os.getenv('TR_TORRENT_DIR'): for cmd in required_cmds: @@ -107,10 +57,8 @@ def extract(file_path, output_destination): del extract_commands[key] else: log.warning('EXTRACTOR: Cannot determine which tool to use when called from Transmission') - if not extract_commands: log.warning('EXTRACTOR: No archive extracting programs found, plugin will be disabled') - ext = os.path.splitext(file_path) cmd = [] if ext[1] in ('.gz', '.bz2', '.lzma'): @@ -130,24 +78,15 @@ def extract(file_path, output_destination): else: log.debug(f'EXTRACTOR: Unknown file type: {ext[1]}') return False - # Create outputDestination folder nzb2media.make_dir(output_destination) - - if nzb2media.PASSWORDS_FILE and os.path.isfile( - os.path.normpath(nzb2media.PASSWORDS_FILE), - ): + if nzb2media.PASSWORDS_FILE and os.path.isfile(os.path.normpath(nzb2media.PASSWORDS_FILE)): with open(os.path.normpath(nzb2media.PASSWORDS_FILE)) as fin: - passwords = [ - line.strip() - for line in fin - ] + passwords = [line.strip() for line in fin] else: passwords = [] - log.info(f'Extracting {file_path} to {output_destination}') log.debug(f'Extracting {cmd} {file_path} {output_destination}') - orig_files = [] orig_dirs = [] for directory, subdirs, files in os.walk(output_destination): @@ -155,12 +94,9 @@ def extract(file_path, output_destination): orig_dirs.append(os.path.join(directory, subdir)) for file in files: orig_files.append(os.path.join(directory, file)) - pwd = os.getcwd() # Get our Present Working Directory - os.chdir( - output_destination, - ) # Not all unpack commands accept full paths, so just extract into this directory - + # Not all unpack commands accept full paths, so just extract into this directory + os.chdir(output_destination) try: # now works same for nt and *nix info = None cmd.append(file_path) # add filePath to final cmd arg. @@ -180,9 +116,7 @@ def extract(file_path, output_destination): elif len(passwords) > 0 and 'gunzip' not in cmd: log.info('EXTRACTOR: Attempting to extract with passwords') for password in passwords: - if ( - password == '' - ): # if edited in windows or otherwise if blank lines. + if password == '': # if edited in windows or otherwise if blank lines. continue cmd2 = cmd # append password here. @@ -200,8 +134,6 @@ def extract(file_path, output_destination): log.error(f'EXTRACTOR: Extraction failed for {file_path}. Could not call command {cmd}') os.chdir(pwd) return False - - devnull.close() os.chdir(pwd) # Go back to our Original Working Directory if success: # sleep to let files finish writing to disk @@ -217,9 +149,7 @@ def extract(file_path, output_destination): for file in files: if not os.path.join(directory, file) in orig_files: try: - shutil.copymode( - file_path, os.path.join(directory, file), - ) + shutil.copymode(file_path, os.path.join(directory, file)) except Exception: pass return True diff --git a/nzb2media/extractor/bin/AMD64/license.txt b/nzb2media/extractor/bin/AMD64/license.txt index 9855d1ea..5f1e982c 100644 --- a/nzb2media/extractor/bin/AMD64/license.txt +++ b/nzb2media/extractor/bin/AMD64/license.txt @@ -2,63 +2,40 @@ ~~~~~ License for use and distribution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 7-Zip Copyright (C) 1999-2018 Igor Pavlov. - The licenses for files are: - 1) 7z.dll: - The "GNU LGPL" as main license for most of the code - The "GNU LGPL" with "unRAR license restriction" for some code - The "BSD 3-clause License" for some code 2) All other files: the "GNU LGPL". - Redistributions in binary form must reproduce related license information from this file. - Note: You can use 7-Zip on any computer, including a computer in a commercial organization. You don't need to register or pay for 7-Zip. - - GNU LGPL information -------------------- - This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of + This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. - You can receive a copy of the GNU Lesser General Public License from http://www.gnu.org/ - - - - BSD 3-clause License -------------------- - The "BSD 3-clause License" is used for the code in 7z.dll that implements LZFSE data decompression. - That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, - that also uses the "BSD 3-clause License": - + That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, that also uses the "BSD 3-clause License": ---- Copyright (c) 2015-2016, Apple Inc. All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES @@ -66,25 +43,15 @@ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- - - - - unRAR license restriction ------------------------- - The decompression engine for RAR archives was developed using source code of unRAR program. All copyrights to original unRAR code are owned by Alexander Roshal. - The license for original unRAR code has the following restriction: - - The unRAR sources cannot be used to re-create the RAR compression algorithm, - which is proprietary. Distribution of modified unRAR sources in separate form + The unRAR sources cannot be used to re-create the RAR compression algorithm, which is proprietary. Distribution of modified unRAR sources in separate form or as a part of other software is permitted, provided that it is clearly stated in the documentation and source comments that the code may not be used to develop a RAR (WinRAR) compatible archiver. - - -- Igor Pavlov diff --git a/nzb2media/extractor/bin/invisible.vbs b/nzb2media/extractor/bin/invisible.vbs index 01979e2a..2f96e3dd 100644 --- a/nzb2media/extractor/bin/invisible.vbs +++ b/nzb2media/extractor/bin/invisible.vbs @@ -1,11 +1,9 @@ set args = WScript.Arguments num = args.Count - if num < 2 then WScript.Echo "Usage: [CScript | WScript] invis.vbs aScript.bat " WScript.Quit 1 end if - sargs = "" if num > 2 then sargs = " " @@ -14,8 +12,6 @@ if num > 2 then sargs = sargs & anArg & " " next end if - Set WshShell = WScript.CreateObject("WScript.Shell") - returnValue = WshShell.Run("""" & args(1) & """" & sargs, args(0), True) WScript.Quit(returnValue) diff --git a/nzb2media/extractor/bin/x86/license.txt b/nzb2media/extractor/bin/x86/license.txt index 9855d1ea..5f1e982c 100644 --- a/nzb2media/extractor/bin/x86/license.txt +++ b/nzb2media/extractor/bin/x86/license.txt @@ -2,63 +2,40 @@ ~~~~~ License for use and distribution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 7-Zip Copyright (C) 1999-2018 Igor Pavlov. - The licenses for files are: - 1) 7z.dll: - The "GNU LGPL" as main license for most of the code - The "GNU LGPL" with "unRAR license restriction" for some code - The "BSD 3-clause License" for some code 2) All other files: the "GNU LGPL". - Redistributions in binary form must reproduce related license information from this file. - Note: You can use 7-Zip on any computer, including a computer in a commercial organization. You don't need to register or pay for 7-Zip. - - GNU LGPL information -------------------- - This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of + This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. - You can receive a copy of the GNU Lesser General Public License from http://www.gnu.org/ - - - - BSD 3-clause License -------------------- - The "BSD 3-clause License" is used for the code in 7z.dll that implements LZFSE data decompression. - That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, - that also uses the "BSD 3-clause License": - + That code was derived from the code in the "LZFSE compression library" developed by Apple Inc, that also uses the "BSD 3-clause License": ---- Copyright (c) 2015-2016, Apple Inc. All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES @@ -66,25 +43,15 @@ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- - - - - unRAR license restriction ------------------------- - The decompression engine for RAR archives was developed using source code of unRAR program. All copyrights to original unRAR code are owned by Alexander Roshal. - The license for original unRAR code has the following restriction: - - The unRAR sources cannot be used to re-create the RAR compression algorithm, - which is proprietary. Distribution of modified unRAR sources in separate form + The unRAR sources cannot be used to re-create the RAR compression algorithm, which is proprietary. Distribution of modified unRAR sources in separate form or as a part of other software is permitted, provided that it is clearly stated in the documentation and source comments that the code may not be used to develop a RAR (WinRAR) compatible archiver. - - -- Igor Pavlov diff --git a/nzb2media/github_api.py b/nzb2media/github_api.py index dd2087e7..c6890fdd 100644 --- a/nzb2media/github_api.py +++ b/nzb2media/github_api.py @@ -7,7 +7,6 @@ class GitHub: """Simple api wrapper for the Github API v3.""" def __init__(self, github_repo_user, github_repo, branch='master'): - self.github_repo_user = github_repo_user self.github_repo = github_repo self.branch = branch @@ -21,37 +20,21 @@ class GitHub: def commits(self): """ Get the 100 most recent commits from the specified user/repo/branch, starting from HEAD. - user: The github username of the person whose repo you're querying repo: The repo name to query branch: Optional, the branch name to show commits from - Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/ """ - return self._access_api( - ['repos', self.github_repo_user, self.github_repo, 'commits'], - params={'per_page': 100, 'sha': self.branch}, - ) + return self._access_api(['repos', self.github_repo_user, self.github_repo, 'commits'], params={'per_page': 100, 'sha': self.branch}) def compare(self, base, head, per_page=1): """ Get compares between base and head. - user: The github username of the person whose repo you're querying repo: The repo name to query base: Start compare from branch head: Current commit sha or branch name to compare per_page: number of items per page - Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ """ - return self._access_api( - [ - 'repos', - self.github_repo_user, - self.github_repo, - 'compare', - f'{base}...{head}', - ], - params={'per_page': per_page}, - ) + return self._access_api(['repos', self.github_repo_user, self.github_repo, 'compare', f'{base}...{head}'], params={'per_page': per_page}) diff --git a/nzb2media/main_db.py b/nzb2media/main_db.py index e3d410ab..b8ff7c75 100644 --- a/nzb2media/main_db.py +++ b/nzb2media/main_db.py @@ -14,9 +14,7 @@ log.addHandler(logging.NullHandler()) def db_filename(filename='nzbtomedia.db', suffix=None): """ Return the correct location of the database file. - - @param filename: The sqlite database filename to use. If not specified, - will be made to be nzbtomedia.db + @param filename: The sqlite database filename to use. If not specified, will be made to be nzbtomedia.db @param suffix: The suffix to append to the filename. A '.' will be added automatically, i.e. suffix='v0' will make dbfile.db.v0 @return: the correct location of the database file. @@ -28,7 +26,6 @@ def db_filename(filename='nzbtomedia.db', suffix=None): class DBConnection: def __init__(self, filename='nzbtomedia.db', suffix=None, row_type=None): - self.filename = filename self.connection = sqlite3.connect(db_filename(filename), 20) self.connection.row_factory = sqlite3.Row @@ -40,7 +37,6 @@ class DBConnection: except sqlite3.OperationalError as error: if 'no such table: db_version' in error.args[0]: return 0 - if result: return int(result[0]['db_version']) return 0 @@ -48,10 +44,8 @@ class DBConnection: def fetch(self, query, args=None): if query is None: return - sql_result = None attempt = 0 - while attempt < 5: try: if args is None: @@ -64,14 +58,10 @@ class DBConnection: cursor = self.connection.cursor() cursor.execute(query, args) sql_result = cursor.fetchone()[0] - # get out of the connection attempt loop since we were successful break except sqlite3.OperationalError as error: - if ( - 'unable to open database file' in error.args[0] - or 'database is locked' in error.args[0] - ): + if 'unable to open database file' in error.args[0] or 'database is locked' in error.args[0]: log.warning(f'DB error: {error}') attempt += 1 time.sleep(1) @@ -81,16 +71,13 @@ class DBConnection: except sqlite3.DatabaseError as error: log.error(f'Fatal error executing query: {error}') raise - return sql_result def mass_action(self, querylist, log_transaction=False): if querylist is None: return - sql_result = [] attempt = 0 - while attempt < 5: try: for query in querylist: @@ -101,9 +88,7 @@ class DBConnection: elif len(query) > 1: if log_transaction: log.debug(f'{query[0]} with args {query[1]}') - sql_result.append( - self.connection.execute(query[0], query[1]), - ) + sql_result.append(self.connection.execute(query[0], query[1])) self.connection.commit() log.debug(f'Transaction with {len(querylist)} query\'s executed') return sql_result @@ -111,10 +96,7 @@ class DBConnection: sql_result = [] if self.connection: self.connection.rollback() - if ( - 'unable to open database file' in error.args[0] - or 'database is locked' in error.args[0] - ): + if 'unable to open database file' in error.args[0] or 'database is locked' in error.args[0]: log.warning(f'DB error: {error}') attempt += 1 time.sleep(1) @@ -126,16 +108,13 @@ class DBConnection: self.connection.rollback() log.error(f'Fatal error executing query: {error}') raise - return sql_result def action(self, query, args=None): if query is None: return - sql_result = None attempt = 0 - while attempt < 5: try: if args is None: @@ -148,10 +127,7 @@ class DBConnection: # get out of the connection attempt loop since we were successful break except sqlite3.OperationalError as error: - if ( - 'unable to open database file' in error.args[0] - or 'database is locked' in error.args[0] - ): + if 'unable to open database file' in error.args[0] or 'database is locked' in error.args[0]: log.warning(f'DB error: {error}') attempt += 1 time.sleep(1) @@ -161,16 +137,12 @@ class DBConnection: except sqlite3.DatabaseError as error: log.error(f'Fatal error executing query: {error}') raise - return sql_result def select(self, query, args=None): - sql_results = self.action(query, args).fetchall() - if sql_results is None: return [] - return sql_results def upsert(self, table_name, value_dict, key_dict): @@ -179,27 +151,9 @@ class DBConnection: changes_before = self.connection.total_changes items = list(value_dict.values()) + list(key_dict.values()) - self.action( - 'UPDATE {table} ' - 'SET {params} ' - 'WHERE {conditions}'.format( - table=table_name, - params=', '.join(gen_params(value_dict)), - conditions=' AND '.join(gen_params(key_dict)), - ), - items, - ) - + self.action('UPDATE {table} SET {params} WHERE {conditions}'.format(table=table_name, params=', '.join(gen_params(value_dict)), conditions=' AND '.join(gen_params(key_dict))), items) if self.connection.total_changes == changes_before: - self.action( - 'INSERT OR IGNORE INTO {table} ({columns}) ' - 'VALUES ({values})'.format( - table=table_name, - columns=', '.join(map(str, value_dict.keys())), - values=', '.join(['?'] * len(value_dict.values())), - ), - list(value_dict.values()), - ) + self.action('INSERT OR IGNORE INTO {table} ({columns}) VALUES ({values})'.format(table=table_name, columns=', '.join(map(str, value_dict.keys())), values=', '.join(['?'] * len(value_dict.values()))), list(value_dict.values())) def table_info(self, table_name): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually @@ -222,17 +176,13 @@ class DBSanityCheck: # =============== # = Upgrade API = # =============== - - def upgrade_database(connection, schema): log.info('Checking database structure...') _process_upgrade(connection, schema) def pretty_name(class_name): - return ' '.join( - [x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)], - ) + return ' '.join([x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)]) def _process_upgrade(connection, upgrade_class): @@ -243,14 +193,11 @@ def _process_upgrade(connection, upgrade_class): try: instance.execute() except sqlite3.DatabaseError as error: - print( - f'Error in {upgrade_class.__name__}: {error}', - ) + print(f'Error in {upgrade_class.__name__}: {error}') raise log.debug(f'{upgrade_class.__name__} upgrade completed') else: log.debug(f'{upgrade_class.__name__} upgrade not required') - for upgrade_sub_class in upgrade_class.__subclasses__(): _process_upgrade(connection, upgrade_sub_class) @@ -261,15 +208,7 @@ class SchemaUpgrade: self.connection = connection def has_table(self, table_name): - return ( - len( - self.connection.action( - 'SELECT 1 FROM sqlite_master WHERE name = ?;', - (table_name,), - ).fetchall(), - ) - > 0 - ) + return len(self.connection.action('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,)).fetchall()) > 0 def has_column(self, table_name, column): return column in self.connection.table_info(table_name) @@ -286,7 +225,5 @@ class SchemaUpgrade: def inc_db_version(self): new_version = self.check_db_version() + 1 - self.connection.action( - 'UPDATE db_version SET db_version = ?', [new_version], - ) + self.connection.action('UPDATE db_version SET db_version = ?', [new_version]) return new_version diff --git a/nzb2media/managers/pymedusa.py b/nzb2media/managers/pymedusa.py index d70fb403..841f80b0 100644 --- a/nzb2media/managers/pymedusa.py +++ b/nzb2media/managers/pymedusa.py @@ -19,12 +19,7 @@ class PyMedusa(SickBeard): @property def url(self): route = f'{self.sb_init.web_root}/home/postprocess/processEpisode' - return nzb2media.utils.common.create_url( - self.sb_init.protocol, - self.sb_init.host, - self.sb_init.port, - route, - ) + return nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route) class PyMedusaApiV1(SickBeard): @@ -33,54 +28,25 @@ class PyMedusaApiV1(SickBeard): @property def url(self) -> str: route = f'{self.sb_init.web_root}/api/{self.sb_init.apikey}/' - return nzb2media.utils.common.create_url( - self.sb_init.protocol, - self.sb_init.host, - self.sb_init.port, - route, - ) + return nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route) def api_call(self) -> ProcessResult: self._process_fork_prarams() log.debug(f'Opening URL: {self.url} with params: {self.sb_init.fork_params}') try: - response = self.session.get( - self.url, - auth=(self.sb_init.username, self.sb_init.password), - params=self.sb_init.fork_params, - stream=True, - verify=False, - timeout=(30, 1800), - ) + response = self.session.get(self.url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: log.error(f'Unable to open URL: {self.url}') - return ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Unable to ' - f'connect to {self.sb_init.section}', - ) - - successful_status_codes = [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ] + return ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Unable to connect to {self.sb_init.section}') + successful_status_codes = [requests.codes.ok, requests.codes.created, requests.codes.accepted] if response.status_code not in successful_status_codes: log.error(f'Server returned status {response.status_code}') - result = ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Server ' - f'returned status {response.status_code}', - ) + result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Server returned status {response.status_code}') elif response.json()['result'] == 'success': - result = ProcessResult.success( - f'{self.sb_init.section}: Successfully post-processed ' - f'{self.input_name}', - ) + result = ProcessResult.success(f'{self.sb_init.section}: Successfully post-processed {self.input_name}') else: # We did not receive Success confirmation. - result = ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Returned ' - f'log from {self.sb_init.section} was not as expected.', - ) + result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Returned log from {self.sb_init.section} was not as expected.') return result @@ -89,25 +55,16 @@ class PyMedusaApiV2(SickBeard): def __init__(self, sb_init): super().__init__(sb_init) - # Check for an apikey # This is required with using fork = medusa-apiv2 if not sb_init.apikey: - log.error( - 'For the section SickBeard `fork = medusa-apiv2` you also ' - 'need to configure an `apikey`', - ) + log.error('For the section SickBeard `fork = medusa-apiv2` you also ' 'need to configure an `apikey`') raise ValueError('Missing apikey for fork: medusa-apiv2') @property def url(self): route = f'{self.sb_init.web_root}/api/v2/postprocess' - return nzb2media.utils.common.create_url( - self.sb_init.protocol, - self.sb_init.host, - self.sb_init.port, - route, - ) + return nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route) def _get_identifier_status(self, url): # Loop through requesting medusa for the status on the queueitem. @@ -116,12 +73,10 @@ class PyMedusaApiV2(SickBeard): except requests.ConnectionError: log.error('Unable to get postprocess identifier status') return False - try: jdata = response.json() except ValueError: return False - return jdata def api_call(self) -> ProcessResult: @@ -130,29 +85,15 @@ class PyMedusaApiV2(SickBeard): payload = self.sb_init.fork_params payload['resource'] = self.sb_init.fork_params['nzbName'] del payload['nzbName'] - # Update the session with the x-api-key - headers = { - 'x-api-key': self.sb_init.apikey, - 'Content-type': 'application/json', - } + headers = {'x-api-key': self.sb_init.apikey, 'Content-type': 'application/json'} self.session.headers.update(headers) - # Send postprocess request try: - response = self.session.post( - self.url, - json=payload, - verify=False, - timeout=(30, 1800), - ) + response = self.session.post(self.url, json=payload, verify=False, timeout=(30, 1800)) except requests.ConnectionError: log.error('Unable to send postprocess request') - return ProcessResult.failure( - f'{self.sb_init.section}: Unable to send postprocess request ' - f'to PyMedusa', - ) - + return ProcessResult.failure(f'{self.sb_init.section}: Unable to send postprocess request to PyMedusa') # Get UUID if response: try: @@ -162,15 +103,12 @@ class PyMedusaApiV2(SickBeard): return ProcessResult.failure('No data returned from provider') else: jdata = {} - status = jdata.get('status', None) if status != 'success': return ProcessResult.failure() - wait_for = int(self.sb_init.config.get('wait_for', 2)) num = 0 response = {} - queue_item_identifier = jdata['queueItem']['identifier'] url = f'{self.url}/{queue_item_identifier}' while num < 12: # set up wait_for minutes to see if command completes.. @@ -181,24 +119,16 @@ class PyMedusaApiV2(SickBeard): if 'error' in response: break num += 1 - # Log Medusa's PP logs here. if response.get('output'): for line in response['output']: log.debug(line) - # For now this will most likely always be True. # In the future we could return an exit state for when the PP in # medusa didn't yield an expected result. if response.get('success'): - result = ProcessResult.success( - f'{self.sb_init.section}: Successfully post-processed ' - f'{self.input_name}', - ) + result = ProcessResult.success(f'{self.sb_init.section}: Successfully post-processed {self.input_name}') else: # We did not receive Success confirmation. - result = ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Returned ' - f'log from {self.sb_init.section} was not as expected.', - ) + result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Returned log from {self.sb_init.section} was not as expected.') return result diff --git a/nzb2media/managers/sickbeard.py b/nzb2media/managers/sickbeard.py index 3f931d4e..ce9d5ce7 100644 --- a/nzb2media/managers/sickbeard.py +++ b/nzb2media/managers/sickbeard.py @@ -17,7 +17,6 @@ log.addHandler(logging.NullHandler()) class InitSickBeard: """SickBeard init class. - Used to determine which SickBeard fork object to initialize. """ @@ -26,7 +25,6 @@ class InitSickBeard: self.config = cfg self.section = section self.input_category = input_category - self.host = cfg['host'] self.port = cfg['port'] self.ssl = int(cfg.get('ssl', 0)) @@ -38,19 +36,10 @@ class InitSickBeard: self.api_version = int(cfg.get('api_version', 2)) self.sso_username = cfg.get('sso_username', '') self.sso_password = cfg.get('sso_password', '') - self.fork = '' self.fork_params = None self.fork_obj = None - - replace = { - 'medusa': 'Medusa', - 'medusa-api': 'Medusa-api', - 'sickbeard-api': 'SickBeard-api', - 'sickgear': 'SickGear', - 'sickchill': 'SickChill', - 'stheno': 'Stheno', - } + replace = {'medusa': 'Medusa', 'medusa-api': 'Medusa-api', 'sickbeard-api': 'SickBeard-api', 'sickgear': 'SickGear', 'sickchill': 'SickChill', 'stheno': 'Stheno'} _val = cfg.get('fork', 'auto') fork_name = replace.get(_val, _val) try: @@ -64,24 +53,10 @@ class InitSickBeard: # config settings if nzb2media.FORK_SET: # keep using determined fork for multiple (manual) post-processing - log.info( - f'{self.section}:{self.input_category} fork already set to ' - f'{nzb2media.FORK_SET[0]}', - ) + log.info(f'{self.section}:{self.input_category} fork already set to {nzb2media.FORK_SET[0]}') return nzb2media.FORK_SET[0], nzb2media.FORK_SET[1] - cfg = dict(nzb2media.CFG[self.section][self.input_category]) - - replace = { - 'medusa': 'Medusa', - 'medusa-api': 'Medusa-api', - 'medusa-apiv1': 'Medusa-api', - 'medusa-apiv2': 'Medusa-apiv2', - 'sickbeard-api': 'SickBeard-api', - 'sickgear': 'SickGear', - 'sickchill': 'SickChill', - 'stheno': 'Stheno', - } + replace = {'medusa': 'Medusa', 'medusa-api': 'Medusa-api', 'medusa-apiv1': 'Medusa-api', 'medusa-apiv2': 'Medusa-apiv2', 'sickbeard-api': 'SickBeard-api', 'sickgear': 'SickGear', 'sickchill': 'SickChill', 'stheno': 'Stheno'} _val = cfg.get('fork', 'auto') fork_name = replace.get(_val.lower(), _val) try: @@ -89,118 +64,42 @@ class InitSickBeard: except KeyError: self.fork = 'auto' protocol = 'https://' if self.ssl else 'http://' - if self.section == 'NzbDrone': log.info(f'Attempting to verify {self.input_category} fork') - url = nzb2media.utils.common.create_url( - scheme=protocol, - host=self.host, - port=self.port, - path=f'{self.web_root}/api/rootfolder', - ) + url = nzb2media.utils.common.create_url(scheme=protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/rootfolder') headers = {'X-Api-Key': self.apikey} try: - response = requests.get( - url, - headers=headers, - stream=True, - verify=False, - ) + response = requests.get(url, headers=headers, stream=True, verify=False) except requests.ConnectionError: - log.warning( - f'Could not connect to {self.section}:' - f'{self.input_category} to verify fork!', - ) - + log.warning(f'Could not connect to {self.section}:{self.input_category} to verify fork!') if not response.ok: - log.warning( - f'Connection to {self.section}:{self.input_category} ' - f'failed! Check your configuration', - ) - + log.warning(f'Connection to {self.section}:{self.input_category} failed! Check your configuration') self.fork = ['default', {}] - elif self.section == 'SiCKRAGE': log.info(f'Attempting to verify {self.input_category} fork') - if self.api_version >= 2: - url = nzb2media.utils.common.create_url( - scheme=protocol, - host=self.host, - port=self.port, - path=f'{self.web_root}/api/v{self.api_version}/ping', - ) + url = nzb2media.utils.common.create_url(scheme=protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/v{self.api_version}/ping') api_params = {} else: api_version = f'v{self.api_version}' - url = nzb2media.utils.common.create_url( - scheme=protocol, - host=self.host, - port=self.port, - path=f'{self.web_root}/api/{api_version}/{self.apikey}/', - ) + url = nzb2media.utils.common.create_url(scheme=protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/{api_version}/{self.apikey}/') api_params = {'cmd': 'postprocess', 'help': '1'} - try: - if ( - self.api_version >= 2 - and self.sso_username - and self.sso_password - ): - oauth = OAuth2Session( - client=LegacyApplicationClient( - client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, - ), - ) - oauth_token = oauth.fetch_token( - client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, - token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL, - username=self.sso_username, - password=self.sso_password, - ) + if self.api_version >= 2 and self.sso_username and self.sso_password: + oauth = OAuth2Session(client=LegacyApplicationClient(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID)) + oauth_token = oauth.fetch_token(client_id=nzb2media.SICKRAGE_OAUTH_CLIENT_ID, token_url=nzb2media.SICKRAGE_OAUTH_TOKEN_URL, username=self.sso_username, password=self.sso_password) token = oauth_token['access_token'] - response = requests.get( - url, - headers={'Authorization': f'Bearer {token}'}, - stream=True, - verify=False, - ) + response = requests.get(url, headers={'Authorization': f'Bearer {token}'}, stream=True, verify=False) else: - response = requests.get( - url, - params=api_params, - stream=True, - verify=False, - ) - + response = requests.get(url, params=api_params, stream=True, verify=False) if not response.ok: - log.warning( - f'Connection to {self.section}:{self.input_category} ' - f'failed! Check your configuration', - ) + log.warning(f'Connection to {self.section}:{self.input_category} failed! Check your configuration') except requests.ConnectionError: - log.warning( - f'Could not connect to {self.section}:' - f'{self.input_category} to verify API version!', - ) - - params = { - 'path': None, - 'failed': None, - 'process_method': None, - 'force_replace': None, - 'return_data': None, - 'type': None, - 'delete': None, - 'force_next': None, - 'is_priority': None, - } - + log.warning(f'Could not connect to {self.section}:{self.input_category} to verify API version!') + params = {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None, 'is_priority': None} self.fork = ['default', params] - elif self.fork == 'auto': self.detect_fork() - log.info(f'{self.section}:{self.input_category} fork set to {self.fork[0]}') nzb2media.FORK_SET = self.fork self.fork, self.fork_params = self.fork[0], self.fork[1] @@ -216,7 +115,6 @@ class InitSickBeard: log.error('Failed to get JSON data from response') log.debug('Response received') raise - try: json_data = json_data['data'] except KeyError: @@ -227,13 +125,12 @@ class InitSickBeard: if isinstance(json_data, str): return rem_params, False json_data = json_data.get('data', json_data) - try: optional_parameters = json_data['optionalParameters'].keys() # Find excess parameters excess_parameters = set(params).difference(optional_parameters) excess_parameters.remove('cmd') # Don't remove cmd from api params - log.debug(f'Removing excess parameters: ' f'{sorted(excess_parameters)}') + log.debug(f'Removing excess parameters: {sorted(excess_parameters)}') rem_params.extend(excess_parameters) return rem_params, True except Exception: @@ -249,56 +146,26 @@ class InitSickBeard: # Define the order to test. # Default must be first since default fork doesn't reject parameters. # Then in order of most unique parameters. - if self.apikey: - url = nzb2media.utils.common.create_url( - scheme=self.protocol, - host=self.host, - port=self.port, - path=f'{self.web_root}/api/{self.apikey}/', - ) + url = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/api/{self.apikey}/') api_params = {'cmd': 'sg.postprocess', 'help': '1'} else: - url = nzb2media.utils.common.create_url( - scheme=self.protocol, - host=self.host, - port=self.port, - path=f'{self.web_root}/home/postprocess', - ) + url = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/home/postprocess') api_params = {} - # attempting to auto-detect fork try: session = requests.Session() - if not self.apikey and self.username and self.password: - login = nzb2media.utils.common.create_url( - scheme=self.protocol, - host=self.host, - port=self.port, - path=f'{self.web_root}/login', - ) - login_params = { - 'username': self.username, - 'password': self.password, - } + login = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/login') + login_params = {'username': self.username, 'password': self.password} response = session.get(login, verify=False, timeout=(30, 60)) if response.status_code in [401, 403] and response.cookies.get('_xsrf'): login_params['_xsrf'] = response.cookies.get('_xsrf') session.post(login, data=login_params, stream=True, verify=False) - response = session.get( - url, - auth=(self.username, self.password), - params=api_params, - verify=False, - ) + response = session.get(url, auth=(self.username, self.password), params=api_params, verify=False) except requests.ConnectionError: - log.info( - f'Could not connect to {self.section}:{self.input_category} ' - f'to perform auto-fork detection!', - ) + log.info(f'Could not connect to {self.section}:{self.input_category} to perform auto-fork detection!') response = [] - if response and response.ok: if self.apikey: rem_params, found = self._api_check(response, params, rem_params) @@ -308,78 +175,44 @@ class InitSickBeard: api_params = {'cmd': 'help', 'subject': 'postprocess'} try: if not self.apikey and self.username and self.password: - response = session.get( - url, - auth=(self.username, self.password), - params=api_params, - verify=False, - ) + response = session.get(url, auth=(self.username, self.password), params=api_params, verify=False) else: response = session.get(url, params=api_params, verify=False) except requests.ConnectionError: - log.info( - f'Could not connect to {self.section}:' - f'{self.input_category} to perform auto-fork ' - f'detection!', - ) + log.info(f'Could not connect to {self.section}:{self.input_category} to perform auto-fork detection!') rem_params, found = self._api_check(response, params, rem_params) params['cmd'] = 'postprocess' else: # Find excess parameters - rem_params.extend( - param - for param in params - if f'name="{param}"' not in response.text - ) - + rem_params.extend(param for param in params if f'name="{param}"' not in response.text) # Remove excess params for param in rem_params: params.pop(param) - for fork in sorted(nzb2media.FORKS, reverse=False): if params == fork[1]: detected = True break - if detected: self.fork = fork - log.info( - f'{self.section}:{self.input_category} fork auto-detection ' - f'successful ...', - ) + log.info(f'{self.section}:{self.input_category} fork auto-detection successful ...') elif rem_params: - log.info( - f'{self.section}:{self.input_category} fork auto-detection ' - f'found custom params {params}', - ) + log.info(f'{self.section}:{self.input_category} fork auto-detection found custom params {params}') self.fork = ['custom', params] else: - log.info( - f'{self.section}:{self.input_category} fork auto-detection ' - f'failed', - ) - self.fork = list(nzb2media.FORKS.items())[ - list(nzb2media.FORKS.keys()).index(nzb2media.FORK_DEFAULT) - ] + log.info(f'{self.section}:{self.input_category} fork auto-detection failed') + self.fork = list(nzb2media.FORKS.items())[list(nzb2media.FORKS.keys()).index(nzb2media.FORK_DEFAULT)] def _init_fork(self): # These need to be imported here, to prevent a circular import. from .pymedusa import PyMedusa, PyMedusaApiV1, PyMedusaApiV2 - mapped_forks = { - 'Medusa': PyMedusa, - 'Medusa-api': PyMedusaApiV1, - 'Medusa-apiv2': PyMedusaApiV2, - } + mapped_forks = {'Medusa': PyMedusa, 'Medusa-api': PyMedusaApiV1, 'Medusa-apiv2': PyMedusaApiV2} log.debug(f'Create object for fork {self.fork}') if self.fork and mapped_forks.get(self.fork): # Create the fork object and pass self (SickBeardInit) to it for all the data, like Config. self.fork_obj = mapped_forks[self.fork](self) else: - log.info( - f'{self.section}:{self.input_category} Could not create a ' - f'fork object for {self.fork}. Probaly class not added yet.', - ) + log.info(f'{self.section}:{self.input_category} Could not create a fork object for {self.fork}. Probaly class not added yet.') class SickBeard: @@ -391,17 +224,12 @@ class SickBeard: """SB constructor.""" self.sb_init = sb_init self.session = requests.Session() - self.failed = None self.status = None self.input_name = None self.dir_name = None - self.delete_failed = int(self.sb_init.config.get('delete_failed', 0)) - self.nzb_extraction_by = self.sb_init.config.get( - 'nzbExtractionBy', - 'Downloader', - ) + self.nzb_extraction_by = self.sb_init.config.get('nzbExtractionBy', 'Downloader') self.process_method = self.sb_init.config.get('process_method') self.remote_path = int(self.sb_init.config.get('remote_path', 0)) self.wait_for = int(self.sb_init.config.get('wait_for', 2)) @@ -409,22 +237,13 @@ class SickBeard: self.delete_on = int(self.sb_init.config.get('delete_on', 0)) self.ignore_subs = int(self.sb_init.config.get('ignore_subs', 0)) self.is_priority = int(self.sb_init.config.get('is_priority', 0)) - # get importmode, default to 'Move' for consistency with legacy self.import_mode = self.sb_init.config.get('importMode', 'Move') - # Keep track of result state self.success = False - def initialize( - self, - dir_name, - input_name=None, - failed=False, - client_agent='manual', - ): + def initialize(self, dir_name, input_name=None, failed=False, client_agent='manual'): """We need to call this explicitely because we need some variables. - We can't pass these directly through the constructor. """ self.dir_name = dir_name @@ -435,10 +254,7 @@ class SickBeard: self.extract = 0 else: self.extract = int(self.sb_init.config.get('extract', 0)) - if ( - client_agent == nzb2media.TORRENT_CLIENT_AGENT - and nzb2media.USE_LINK == 'move-sym' - ): + if client_agent == nzb2media.TORRENT_CLIENT_AGENT and nzb2media.USE_LINK == 'move-sym': self.process_method = 'symlink' @property @@ -447,12 +263,7 @@ class SickBeard: route = f'{self.sb_init.web_root}/api/{self.sb_init.apikey}/' else: route = f'{self.sb_init.web_root}/home/postprocess/processEpisode' - return nzb2media.utils.common.create_url( - scheme=self.sb_init.protocol, - host=self.sb_init.host, - port=self.sb_init.port, - path=route, - ) + return nzb2media.utils.common.create_url(scheme=self.sb_init.protocol, host=self.sb_init.host, port=self.sb_init.port, path=route) def _process_fork_prarams(self): # configure SB params to pass @@ -461,7 +272,6 @@ class SickBeard: fork_params['proc_type'] = 'manual' if self.input_name is not None: fork_params['nzbName'] = self.input_name - for param in copy.copy(fork_params): if param == 'failed': if self.failed > 1: @@ -471,66 +281,50 @@ class SickBeard: del fork_params['proc_type'] if 'type' in fork_params: del fork_params['type'] - if param == 'return_data': fork_params[param] = 0 if 'quiet' in fork_params: del fork_params['quiet'] - if param == 'type': if 'type' in fork_params: # Set if we haven't already deleted for 'failed' above. fork_params[param] = 'manual' if 'proc_type' in fork_params: del fork_params['proc_type'] - - if param in [ - 'dir_name', - 'dir', - 'proc_dir', - 'process_directory', - 'path', - ]: + if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']: fork_params[param] = self.dir_name if self.remote_path: fork_params[param] = remote_dir(self.dir_name) # SickChill allows multiple path types. Only retunr 'path' if param == 'proc_dir' and 'path' in fork_params: del fork_params['proc_dir'] - if param == 'process_method': if self.process_method: fork_params[param] = self.process_method else: del fork_params[param] - if param in ['force', 'force_replace']: if self.force: fork_params[param] = self.force else: del fork_params[param] - if param in ['delete_on', 'delete']: if self.delete_on: fork_params[param] = self.delete_on else: del fork_params[param] - if param == 'ignore_subs': if self.ignore_subs: fork_params[param] = self.ignore_subs else: del fork_params[param] - if param == 'is_priority': if self.is_priority: fork_params[param] = self.is_priority else: del fork_params[param] - if param == 'force_next': fork_params[param] = 1 - # delete any unused params so we don't pass them to SB by mistake [fork_params.pop(k) for k, v in list(fork_params.items()) if v is None] @@ -539,66 +333,30 @@ class SickBeard: self._process_fork_prarams() log.debug(f'Opening URL: {self.url} with params: {self.sb_init.fork_params}') try: - if ( - not self.sb_init.apikey - and self.sb_init.username - and self.sb_init.password - ): + if not self.sb_init.apikey and self.sb_init.username and self.sb_init.password: # If not using the api, we need to login using user/pass first. route = f'{self.sb_init.web_root}/login' - login = nzb2media.utils.common.create_url( - self.sb_init.protocol, - self.sb_init.host, - self.sb_init.port, - route, - ) - login_params = { - 'username': self.sb_init.username, - 'password': self.sb_init.password, - } + login = nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route) + login_params = {'username': self.sb_init.username, 'password': self.sb_init.password} response = self.session.get(login, verify=False, timeout=(30, 60)) if response.status_code in [401, 403] and response.cookies.get('_xsrf'): login_params['_xsrf'] = response.cookies.get('_xsrf') - self.session.post( - login, - data=login_params, - stream=True, - verify=False, - timeout=(30, 60), - ) - response = self.session.get( - self.url, - auth=(self.sb_init.username, self.sb_init.password), - params=self.sb_init.fork_params, - stream=True, - verify=False, - timeout=(30, 1800), - ) + self.session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) + response = self.session.get(self.url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: log.error(f'Unable to open URL: {self.url}') - result = ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Unable to ' - f'connect to {self.sb_init.section}', - ) + result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Unable to connect to {self.sb_init.section}') else: - successful_statuses = [ - requests.codes.ok, - requests.codes.created, - requests.codes.accepted, - ] + successful_statuses = [requests.codes.ok, requests.codes.created, requests.codes.accepted] if response.status_code not in successful_statuses: log.error(f'Server returned status {response.status_code}') - result = ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Server ' - f'returned status {response.status_code}', - ) + result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Server returned status {response.status_code}') else: result = self.process_response(response) return result def process_response(self, response: requests.Response) -> ProcessResult: """Iterate over the lines returned, and log. - :param response: Streamed Requests response object. This method will need to be overwritten in the forks, for alternative response handling. """ @@ -610,23 +368,12 @@ class SickBeard: # input_name = os.path.split(line)[1] # if 'added to the queue' in line: # queued = True - # For the refactoring i'm only considering vanilla sickbeard, - # as for the base class. - if ( - 'Processing succeeded' in line - or 'Successfully processed' in line - ): + # For the refactoring i'm only considering vanilla sickbeard, # as for the base class. + if 'Processing succeeded' in line or 'Successfully processed' in line: self.success = True - if self.success: - result = ProcessResult.success( - f'{self.sb_init.section}: Successfully post-processed ' - f'{self.input_name}', - ) + result = ProcessResult.success(f'{self.sb_init.section}: Successfully post-processed {self.input_name}') else: # We did not receive Success confirmation. - result = ProcessResult.failure( - f'{self.sb_init.section}: Failed to post-process - Returned ' - f'log from {self.sb_init.section} was not as expected.', - ) + result = ProcessResult.failure(f'{self.sb_init.section}: Failed to post-process - Returned log from {self.sb_init.section} was not as expected.') return result diff --git a/nzb2media/nzb/configuration.py b/nzb2media/nzb/configuration.py index 299aa9e0..ecd33b4a 100644 --- a/nzb2media/nzb/configuration.py +++ b/nzb2media/nzb/configuration.py @@ -8,13 +8,11 @@ def configure_nzbs(config): nzb2media.NZB_CLIENT_AGENT = nzb_config['clientAgent'] # sabnzbd nzb2media.NZB_DEFAULT_DIRECTORY = nzb_config['default_downloadDirectory'] nzb2media.NZB_NO_MANUAL = int(nzb_config['no_manual'], 0) - configure_sabnzbd(nzb_config) def configure_sabnzbd(config): nzb2media.SABNZBD_HOST = config['sabnzbd_host'] - nzb2media.SABNZBD_PORT = int( - config['sabnzbd_port'] or 8080, - ) # defaults to accommodate NzbGet + # defaults to accommodate NzbGet + nzb2media.SABNZBD_PORT = int(config['sabnzbd_port'] or 8080) nzb2media.SABNZBD_APIKEY = config['sabnzbd_apikey'] diff --git a/nzb2media/plugins/plex.py b/nzb2media/plugins/plex.py index 8c220f9c..6585e273 100644 --- a/nzb2media/plugins/plex.py +++ b/nzb2media/plugins/plex.py @@ -16,27 +16,17 @@ def configure_plex(config): nzb2media.PLEX_PORT = config['Plex']['plex_port'] nzb2media.PLEX_TOKEN = config['Plex']['plex_token'] plex_section = config['Plex']['plex_sections'] or [] - if plex_section: if isinstance(plex_section, list): - plex_section = ','.join( - plex_section, - ) # fix in case this imported as list. - plex_section = [ - tuple(item.split(',')) for item in plex_section.split('|') - ] - + plex_section = ','.join(plex_section) # fix in case this imported as list. + plex_section = [tuple(item.split(',')) for item in plex_section.split('|')] nzb2media.PLEX_SECTION = plex_section def plex_update(category): if nzb2media.FAILED: return - url = '{scheme}://{host}:{port}/library/sections/'.format( - scheme='https' if nzb2media.PLEX_SSL else 'http', - host=nzb2media.PLEX_HOST, - port=nzb2media.PLEX_PORT, - ) + url = '{scheme}://{host}:{port}/library/sections/'.format(scheme='https' if nzb2media.PLEX_SSL else 'http', host=nzb2media.PLEX_HOST, port=nzb2media.PLEX_PORT) section = None if not nzb2media.PLEX_SECTION: return @@ -44,7 +34,6 @@ def plex_update(category): for item in nzb2media.PLEX_SECTION: if item[0] == category: section = item[1] - if section: url = f'{url}{section}/refresh?X-Plex-Token={nzb2media.PLEX_TOKEN}' requests.get(url, timeout=(60, 120), verify=False) diff --git a/nzb2media/plugins/subtitles.py b/nzb2media/plugins/subtitles.py index 93703cf5..5c7240c8 100644 --- a/nzb2media/plugins/subtitles.py +++ b/nzb2media/plugins/subtitles.py @@ -7,41 +7,36 @@ import re import subliminal from babelfish import Language -import nzb2media +from nzb2media import GETSUBS +from nzb2media import SLANGUAGES +from nzb2media.utils.files import list_media_files log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) def import_subs(filename): - if not nzb2media.GETSUBS: + if not GETSUBS: return try: - subliminal.region.configure( - 'dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'}, - ) + subliminal.region.configure('dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'}) except Exception: pass - languages = set() - for item in nzb2media.SLANGUAGES: + for item in SLANGUAGES: try: languages.add(Language(item)) except Exception: pass if not languages: return - log.info(f'Attempting to download subtitles for {filename}') try: video = subliminal.scan_video(filename) subtitles = subliminal.download_best_subtitles({video}, languages) subliminal.save_subtitles(video, subtitles[video]) - for subtitle in subtitles[video]: - subtitle_path = subliminal.subtitle.get_subtitle_path( - video.name, subtitle.language, - ) + subtitle_path = subliminal.subtitle.get_subtitle_path(video.name, subtitle.language) os.chmod(subtitle_path, 0o644) except Exception as error: log.error(f'Failed to download subtitles for {filename} due to: {error}') @@ -50,31 +45,22 @@ def import_subs(filename): def rename_subs(path): filepaths = [] sub_ext = ['.srt', '.sub', '.idx'] - vidfiles = nzb2media.list_media_files( - path, media=True, audio=False, meta=False, archives=False, - ) - if ( - not vidfiles or len(vidfiles) > 1 - ): # If there is more than 1 video file, or no video files, we can't rename subs. + vidfiles = list_media_files(path, media=True, audio=False, meta=False, archives=False) + if not vidfiles or len(vidfiles) > 1: # If there is more than 1 video file, or no video files, we can't rename subs. return name = os.path.splitext(os.path.split(vidfiles[0])[1])[0] for directory, _, filenames in os.walk(path): for filename in filenames: filepaths.extend([os.path.join(directory, filename)]) - subfiles = [ - item for item in filepaths if os.path.splitext(item)[1] in sub_ext - ] + subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext] subfiles.sort() # This should sort subtitle names by language (alpha) and Number (where multiple) renamed = [] for sub in subfiles: subname, ext = os.path.splitext(os.path.basename(sub)) - if ( - name in subname - ): # The sub file name already includes the video name. - continue - words = re.findall( - '[a-zA-Z]+', str(subname), - ) # find whole words in string + if name in subname: + continue # The sub file name already includes the video name. + # find whole words in string + words = re.findall('[a-zA-Z]+', str(subname)) # parse the words for language descriptors. lan = None for word in words: @@ -95,12 +81,10 @@ def rename_subs(path): new_sub_name = name else: new_sub_name = f'{name}.{str(lan)}' - new_sub = os.path.join( - directory, new_sub_name, - ) # full path and name less ext - if ( - f'{new_sub}{ext}' in renamed - ): # If duplicate names, add unique number before ext. + # full path and name less ext + new_sub = os.path.join(directory, new_sub_name) + if f'{new_sub}{ext}' in renamed: + # If duplicate names, add unique number before ext. for i in range(1, len(renamed) + 1): if f'{new_sub}.{i}{ext}' in renamed: continue diff --git a/nzb2media/processor/manual.py b/nzb2media/processor/manual.py index 3ea4303e..c272bbd4 100644 --- a/nzb2media/processor/manual.py +++ b/nzb2media/processor/manual.py @@ -16,13 +16,8 @@ log.addHandler(logging.NullHandler()) def process(): # Perform Manual Post-Processing log.warning('Invalid number of arguments received from client, Switching to manual run mode ...') - # Post-Processing Result - result = ProcessResult( - message='', - status_code=0, - ) - + result = ProcessResult(message='', status_code=0) for section, subsections in nzb2media.SECTIONS.items(): for subsection in subsections: if not nzb2media.CFG[section][subsection].isenabled(): @@ -30,38 +25,19 @@ def process(): for dir_name in get_dirs(section, subsection, link='move'): log.info(f'Starting manual run for {section}:{subsection} - Folder: {dir_name}') log.info(f'Checking database for download info for {os.path.basename(dir_name)} ...') - - nzb2media.DOWNLOAD_INFO = get_download_info( - os.path.basename(dir_name), - 0, - ) + nzb2media.DOWNLOAD_INFO = get_download_info(os.path.basename(dir_name), 0) if nzb2media.DOWNLOAD_INFO: log.info(f'Found download info for {os.path.basename(dir_name)}, setting variables now ...') - client_agent = ( - nzb2media.DOWNLOAD_INFO[0]['client_agent'] or 'manual' - ) + client_agent = nzb2media.DOWNLOAD_INFO[0]['client_agent'] or 'manual' download_id = nzb2media.DOWNLOAD_INFO[0]['input_id'] or '' else: log.info(f'Unable to locate download info for {os.path.basename(dir_name)}, continuing to try and process this release ...') client_agent = 'manual' download_id = '' - - if ( - client_agent - and client_agent.lower() not in nzb2media.NZB_CLIENTS - ): + if client_agent and client_agent.lower() not in nzb2media.NZB_CLIENTS: continue - input_name = os.path.basename(dir_name) - - results = nzb.process( - dir_name, - input_name, - 0, - client_agent=client_agent, - download_id=download_id or None, - input_category=subsection, - ) + results = nzb.process(dir_name, input_name, 0, client_agent=client_agent, download_id=download_id or None, input_category=subsection) if results.status_code != 0: log.error(f'A problem was reported when trying to perform a manual run for {section}:{subsection}.') result = results diff --git a/nzb2media/processor/nzb.py b/nzb2media/processor/nzb.py index ab060380..0e70ed65 100644 --- a/nzb2media/processor/nzb.py +++ b/nzb2media/processor/nzb.py @@ -25,22 +25,10 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def process( - input_directory, - input_name=None, - status=0, - client_agent='manual', - download_id=None, - input_category=None, - failure_link=None, -): +def process(input_directory, input_name=None, status=0, client_agent='manual', download_id=None, input_category=None, failure_link=None): if nzb2media.SAFE_MODE and input_directory == nzb2media.NZB_DEFAULT_DIRECTORY: log.error(f'The input directory:[{input_directory}] is the Default Download Directory. Please configure category directories to prevent processing of other media.') - return ProcessResult( - message='', - status_code=-1, - ) - + return ProcessResult(message='', status_code=-1) if not download_id and client_agent == 'sabnzbd': download_id = get_nzoid(input_name) if client_agent != 'manual' and not nzb2media.DOWNLOAD_INFO: @@ -54,14 +42,7 @@ def process( except Exception: pass control_value_dict = {'input_directory': input_directory1} - new_value_dict = { - 'input_name': input_name1, - 'input_hash': download_id, - 'input_id': download_id, - 'client_agent': client_agent, - 'status': 0, - 'last_update': datetime.date.today().toordinal(), - } + new_value_dict = {'input_name': input_name1, 'input_hash': download_id, 'input_id': download_id, 'client_agent': client_agent, 'status': 0, 'last_update': datetime.date.today().toordinal()} my_db.upsert('downloads', new_value_dict, control_value_dict) # auto-detect section if input_category is None: @@ -72,35 +53,23 @@ def process( section = nzb2media.CFG.findsection('ALL').isenabled() if section is None: log.error(f'Category:[{input_category}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.') - return ProcessResult( - message='', - status_code=-1, - ) + return ProcessResult(message='', status_code=-1) usercat = 'ALL' if len(section) > 1: log.error(f'Category:[{input_category}] is not unique, {section.keys()} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.') - return ProcessResult( - message='', - status_code=-1, - ) + return ProcessResult(message='', status_code=-1) if section: section_name = section.keys()[0] log.info(f'Auto-detected SECTION:{section_name}') else: log.error(f'Unable to locate a section with subsection:{input_category} enabled in your autoProcessMedia.cfg, exiting!') - return ProcessResult( - status_code=-1, - message='', - ) + return ProcessResult(status_code=-1, message='') cfg = dict(nzb2media.CFG[section_name][usercat]) extract = int(cfg.get('extract', 0)) try: if int(cfg.get('remote_path')) and not nzb2media.REMOTE_PATHS: log.error(f'Remote Path is enabled for {section_name}:{input_category} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!') - return ProcessResult( - status_code=-1, - message='', - ) + return ProcessResult(status_code=-1, message='') except Exception: remote_path = cfg.get('remote_path') log.error(f'Remote Path {remote_path} is not valid for {section_name}:{input_category} Please set this to either 0 to disable or 1 to enable!') @@ -110,47 +79,17 @@ def process( extract_files(input_directory) log.info(f'Calling {section_name}:{input_category} to post-process:{input_name}') if section_name == 'UserScript': - result = external_script( - input_directory, input_name, input_category, section[usercat], - ) + result = external_script(input_directory, input_name, input_category, section[usercat]) else: - process_map = { - 'CouchPotato': movies.process, - 'Radarr': movies.process, - 'Watcher3': movies.process, - 'SickBeard': tv.process, - 'SiCKRAGE': tv.process, - 'NzbDrone': tv.process, - 'Sonarr': tv.process, - 'LazyLibrarian': books.process, - 'HeadPhones': music.process, - 'Lidarr': music.process, - 'Mylar': comics.process, - 'Gamez': games.process, - } + process_map = {'CouchPotato': movies.process, 'Radarr': movies.process, 'Watcher3': movies.process, 'SickBeard': tv.process, 'SiCKRAGE': tv.process, 'NzbDrone': tv.process, 'Sonarr': tv.process, 'LazyLibrarian': books.process, 'HeadPhones': music.process, 'Lidarr': music.process, 'Mylar': comics.process, 'Gamez': games.process} processor = process_map[section_name] - result = processor( - section=section_name, - dir_name=input_directory, - input_name=input_name, - status=status, - client_agent=client_agent, - download_id=download_id, - input_category=input_category, - failure_link=failure_link, - ) + result = processor(section=section_name, dir_name=input_directory, input_name=input_name, status=status, client_agent=client_agent, download_id=download_id, input_category=input_category, failure_link=failure_link) plex_update(input_category) if result.status_code == 0: if client_agent != 'manual': # update download status in our DB update_download_info_status(input_name, 1) - if section_name not in [ - 'UserScript', - 'NzbDrone', - 'Sonarr', - 'Radarr', - 'Lidarr', - ]: + if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: # cleanup our processing folders of any misc unwanted files and # empty directories clean_dir(input_directory, section_name, input_category) diff --git a/nzb2media/processor/nzbget.py b/nzb2media/processor/nzbget.py index 95295c20..cd5a57a6 100644 --- a/nzb2media/processor/nzbget.py +++ b/nzb2media/processor/nzbget.py @@ -13,13 +13,7 @@ log.addHandler(logging.NullHandler()) def parse_download_id(): """Parse nzbget download_id from environment.""" - download_id_keys = [ - 'NZBPR_COUCHPOTATO', - 'NZBPR_DRONE', - 'NZBPR_SONARR', - 'NZBPR_RADARR', - 'NZBPR_LIDARR', - ] + download_id_keys = ['NZBPR_COUCHPOTATO', 'NZBPR_DRONE', 'NZBPR_SONARR', 'NZBPR_RADARR', 'NZBPR_LIDARR'] for download_id_key in download_id_keys: try: return os.environ[download_id_key] @@ -102,12 +96,4 @@ def process(): status = parse_status() download_id = parse_download_id() failure_link = parse_failure_link() - return nzb.process( - input_directory=os.environ['NZBPP_DIRECTORY'], - input_name=os.environ['NZBPP_NZBNAME'], - status=status, - client_agent='nzbget', - download_id=download_id, - input_category=os.environ['NZBPP_CATEGORY'], - failure_link=failure_link, - ) + return nzb.process(input_directory=os.environ['NZBPP_DIRECTORY'], input_name=os.environ['NZBPP_NZBNAME'], status=status, client_agent='nzbget', download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'], failure_link=failure_link) diff --git a/nzb2media/processor/sab.py b/nzb2media/processor/sab.py index 578677bc..b965dc84 100644 --- a/nzb2media/processor/sab.py +++ b/nzb2media/processor/sab.py @@ -7,27 +7,17 @@ from nzb2media.processor import nzb log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - MINIMUM_ARGUMENTS = 8 def process_script(): version = os.environ['SAB_VERSION'] log.info(f'Script triggered from SABnzbd {version}.') - return nzb.process( - input_directory=os.environ['SAB_COMPLETE_DIR'], - input_name=os.environ['SAB_FINAL_NAME'], - status=int(os.environ['SAB_PP_STATUS']), - client_agent='sabnzbd', - download_id=os.environ['SAB_NZO_ID'], - input_category=os.environ['SAB_CAT'], - failure_link=os.environ['SAB_FAILURE_URL'], - ) + return nzb.process(input_directory=os.environ['SAB_COMPLETE_DIR'], input_name=os.environ['SAB_FINAL_NAME'], status=int(os.environ['SAB_PP_STATUS']), client_agent='sabnzbd', download_id=os.environ['SAB_NZO_ID'], input_category=os.environ['SAB_CAT'], failure_link=os.environ['SAB_FAILURE_URL']) def process(args): """Process job from SABnzb. - SABnzbd arguments: 1. The final directory of the job (full path) 2. The original name of the NZB file @@ -44,12 +34,4 @@ def process(args): """ version = '0.7.17+' if len(args) > MINIMUM_ARGUMENTS else '' log.info(f'Script triggered from SABnzbd {version}') - return nzb.process( - input_directory=args[1], - input_name=args[2], - status=int(args[7]), - input_category=args[5], - client_agent='sabnzbd', - download_id='', - failure_link=''.join(args[8:]), - ) + return nzb.process(input_directory=args[1], input_name=args[2], status=int(args[7]), input_category=args[5], client_agent='sabnzbd', download_id='', failure_link=''.join(args[8:])) diff --git a/nzb2media/scene_exceptions.py b/nzb2media/scene_exceptions.py index 24d3cb16..fea2486c 100644 --- a/nzb2media/scene_exceptions.py +++ b/nzb2media/scene_exceptions.py @@ -12,66 +12,14 @@ from nzb2media.utils.files import list_media_files log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - - -reverse_list = [ - r'\.\d{2}e\d{2}s\.', - r'\.[pi]0801\.', - r'\.p027\.', - r'\.[pi]675\.', - r'\.[pi]084\.', - r'\.p063\.', - r'\b[45]62[xh]\.', - r'\.yarulb\.', - r'\.vtd[hp]\.', - r'\.ld[.-]?bew\.', - r'\.pir.?(dov|dvd|bew|db|rb)\.', - r'\brdvd\.', - r'\.vts\.', - r'\.reneercs\.', - r'\.dcv\.', - r'\b(pir|mac)dh\b', - r'\.reporp\.', - r'\.kcaper\.', - r'\.lanretni\.', - r'\b3ca\b', - r'\.cstn\.', -] +reverse_list = [r'\.\d{2}e\d{2}s\.', r'\.[pi]0801\.', r'\.p027\.', r'\.[pi]675\.', r'\.[pi]084\.', r'\.p063\.', r'\b[45]62[xh]\.', r'\.yarulb\.', r'\.vtd[hp]\.', r'\.ld[.-]?bew\.', r'\.pir.?(dov|dvd|bew|db|rb)\.', r'\brdvd\.', r'\.vts\.', r'\.reneercs\.', r'\.dcv\.', r'\b(pir|mac)dh\b', r'\.reporp\.', r'\.kcaper\.', r'\.lanretni\.', r'\b3ca\b', r'\.cstn\.'] reverse_pattern = re.compile('|'.join(reverse_list), flags=re.IGNORECASE) season_pattern = re.compile(r'(.*\.\d{2}e\d{2}s\.)(.*)', flags=re.IGNORECASE) word_pattern = re.compile(r'([^A-Z0-9]*[A-Z0-9]+)') -media_list = [ - r'\.s\d{2}e\d{2}\.', - r'\.1080[pi]\.', - r'\.720p\.', - r'\.576[pi]', - r'\.480[pi]\.', - r'\.360p\.', - r'\.[xh]26[45]\b', - r'\.bluray\.', - r'\.[hp]dtv\.', - r'\.web[.-]?dl\.', - r'\.(vod|dvd|web|bd|br).?rip\.', - r'\.dvdr\b', - r'\.stv\.', - r'\.screener\.', - r'\.vcd\.', - r'\bhd(cam|rip)\b', - r'\.proper\.', - r'\.repack\.', - r'\.internal\.', - r'\bac3\b', - r'\.ntsc\.', - r'\.pal\.', - r'\.secam\.', - r'\bdivx\b', - r'\bxvid\b', -] +media_list = [r'\.s\d{2}e\d{2}\.', r'\.1080[pi]\.', r'\.720p\.', r'\.576[pi]', r'\.480[pi]\.', r'\.360p\.', r'\.[xh]26[45]\b', r'\.bluray\.', r'\.[hp]dtv\.', r'\.web[.-]?dl\.', r'\.(vod|dvd|web|bd|br).?rip\.', r'\.dvdr\b', r'\.stv\.', r'\.screener\.', r'\.vcd\.', r'\bhd(cam|rip)\b', r'\.proper\.', r'\.repack\.', r'\.internal\.', r'\bac3\b', r'\.ntsc\.', r'\.pal\.', r'\.secam\.', r'\bdivx\b', r'\bxvid\b'] media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE) garbage_name = re.compile(r'^[a-zA-Z0-9]*$') -char_replace = [ - [r'(\w)1\.(\w)', r'\1i\2'], -] +char_replace = [[r'(\w)1\.(\w)', r'\1i\2']] def process_all_exceptions(name, dirname): @@ -112,11 +60,7 @@ def strip_groups(filename): def rename_file(filename, newfile_path): if os.path.isfile(newfile_path): - newfile_path = ( - os.path.splitext(newfile_path)[0] - + '.NTM' - + os.path.splitext(newfile_path)[1] - ) + newfile_path = os.path.splitext(newfile_path)[0] + '.NTM' + os.path.splitext(newfile_path)[1] log.error(f'Replacing file name {filename} with download name {newfile_path}') try: os.rename(filename, newfile_path) @@ -126,10 +70,7 @@ def rename_file(filename, newfile_path): def replace_filename(filename, dirname, name): head, file_extension = os.path.splitext(os.path.basename(filename)) - if ( - media_pattern.search(os.path.basename(dirname).replace(' ', '.')) - is not None - ): + if media_pattern.search(os.path.basename(dirname).replace(' ', '.')) is not None: newname = os.path.basename(dirname).replace(' ', '.') log.debug(f'Replacing file name {head} with directory name {newname}') elif media_pattern.search(name.replace(' ', '.').lower()) is not None: @@ -178,10 +119,7 @@ def rename_script(dirname): break if rename_file: with open(rename_file) as fin: - rename_lines = [ - line.strip() - for line in fin - ] + rename_lines = [line.strip() for line in fin] for line in rename_lines: if re.search('^(mv|Move)', line, re.IGNORECASE): cmd = shlex.split(line)[1:] @@ -189,9 +127,7 @@ def rename_script(dirname): continue if len(cmd) == 2 and os.path.isfile(os.path.join(dirname, cmd[0])): orig = os.path.join(dirname, cmd[0]) - dest = os.path.join( - dirname, cmd[1].split('\\')[-1].split('/')[-1], - ) + dest = os.path.join(dirname, cmd[1].split('\\')[-1].split('/')[-1]) if os.path.isfile(dest): continue log.debug(f'Renaming file {orig} to {dest}') diff --git a/nzb2media/torrent/configuration.py b/nzb2media/torrent/configuration.py index cf604fd5..0e5e131b 100644 --- a/nzb2media/torrent/configuration.py +++ b/nzb2media/torrent/configuration.py @@ -6,17 +6,10 @@ from nzb2media.utils.torrent import create_torrent_class def configure_torrents(config): torrent_config = config['Torrent'] - nzb2media.TORRENT_CLIENT_AGENT = torrent_config[ - 'clientAgent' - ] # utorrent | deluge | transmission | rtorrent | vuze | qbittorrent | synods | other - nzb2media.OUTPUT_DIRECTORY = torrent_config[ - 'outputDirectory' - ] # /abs/path/to/complete/ - nzb2media.TORRENT_DEFAULT_DIRECTORY = torrent_config[ - 'default_downloadDirectory' - ] + nzb2media.TORRENT_CLIENT_AGENT = torrent_config['clientAgent'] # utorrent | deluge | transmission | rtorrent | vuze | qbittorrent | synods | other + nzb2media.OUTPUT_DIRECTORY = torrent_config['outputDirectory'] # /abs/path/to/complete/ + nzb2media.TORRENT_DEFAULT_DIRECTORY = torrent_config['default_downloadDirectory'] nzb2media.TORRENT_NO_MANUAL = int(torrent_config['no_manual'], 0) - configure_torrent_linking(torrent_config) configure_flattening(torrent_config) configure_torrent_deletion(torrent_config) @@ -41,9 +34,7 @@ def configure_flattening(config): def configure_torrent_categories(config): - nzb2media.CATEGORIES = config[ - 'categories' - ] # music,music_videos,pictures,software + nzb2media.CATEGORIES = config['categories'] # music,music_videos,pictures,software if isinstance(nzb2media.CATEGORIES, str): nzb2media.CATEGORIES = nzb2media.CATEGORIES.split(',') @@ -62,9 +53,7 @@ def configure_torrent_deletion(config): def configure_utorrent(config): - nzb2media.UTORRENT_WEB_UI = config[ - 'uTorrentWEBui' - ] # http://localhost:8090/gui/ + nzb2media.UTORRENT_WEB_UI = config['uTorrentWEBui'] # http://localhost:8090/gui/ nzb2media.UTORRENT_USER = config['uTorrentUSR'] # mysecretusr nzb2media.UTORRENT_PASSWORD = config['uTorrentPWD'] # mysecretpwr diff --git a/nzb2media/torrent/deluge.py b/nzb2media/torrent/deluge.py index 0325b0ef..e339922f 100644 --- a/nzb2media/torrent/deluge.py +++ b/nzb2media/torrent/deluge.py @@ -16,7 +16,6 @@ def configure_client(): port = nzb2media.DELUGE_PORT user = nzb2media.DELUGE_USER password = nzb2media.DELUGE_PASSWORD - log.debug(f'Connecting to {agent}: http://{host}:{port}') client = DelugeRPCClient(host, port, user, password) try: diff --git a/nzb2media/torrent/qbittorrent.py b/nzb2media/torrent/qbittorrent.py index e60f4ac6..95b7e4f1 100644 --- a/nzb2media/torrent/qbittorrent.py +++ b/nzb2media/torrent/qbittorrent.py @@ -16,7 +16,6 @@ def configure_client(): port = nzb2media.QBITTORRENT_PORT user = nzb2media.QBITTORRENT_USER password = nzb2media.QBITTORRENT_PASSWORD - log.debug(f'Connecting to {agent}: http://{host}:{port}') client = qBittorrentClient(f'http://{host}:{port}/') try: diff --git a/nzb2media/torrent/synology.py b/nzb2media/torrent/synology.py index 85c75e22..573db062 100644 --- a/nzb2media/torrent/synology.py +++ b/nzb2media/torrent/synology.py @@ -15,7 +15,6 @@ def configure_client(): port = nzb2media.SYNO_PORT user = nzb2media.SYNO_USER password = nzb2media.SYNO_PASSWORD - log.debug(f'Connecting to {agent}: http://{host}:{port}') try: client = DownloadStation(host, port, user, password) diff --git a/nzb2media/torrent/transmission.py b/nzb2media/torrent/transmission.py index 2a42b5a5..843f8da8 100644 --- a/nzb2media/torrent/transmission.py +++ b/nzb2media/torrent/transmission.py @@ -16,7 +16,6 @@ def configure_client(): port = nzb2media.TRANSMISSION_PORT user = nzb2media.TRANSMISSION_USER password = nzb2media.TRANSMISSION_PASSWORD - log.debug(f'Connecting to {agent}: http://{host}:{port}') try: client = TransmissionClient(host, port, user, password) diff --git a/nzb2media/torrent/utorrent.py b/nzb2media/torrent/utorrent.py index fc752157..328693fc 100644 --- a/nzb2media/torrent/utorrent.py +++ b/nzb2media/torrent/utorrent.py @@ -15,7 +15,6 @@ def configure_client(): web_ui = nzb2media.UTORRENT_WEB_UI user = nzb2media.UTORRENT_USER password = nzb2media.UTORRENT_PASSWORD - log.debug(f'Connecting to {agent}: {web_ui}') try: client = UTorrentClient(web_ui, user, password) diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index 04e80b76..6a2b6700 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -20,20 +20,13 @@ from nzb2media.utils.paths import make_dir log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - __author__ = 'Justin' def is_video_good(video: pathlib.Path, status, require_lan=None): file_ext = video.suffix disable = False - if ( - file_ext not in nzb2media.MEDIA_CONTAINER - or not nzb2media.FFPROBE - or not nzb2media.CHECK_MEDIA - or file_ext in ['.iso'] - or (status > 0 and nzb2media.NOEXTRACTFAILED) - ): + if file_ext not in nzb2media.MEDIA_CONTAINER or not nzb2media.FFPROBE or not nzb2media.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and nzb2media.NOEXTRACTFAILED): disable = True else: test_details, res = get_video_details(nzb2media.TEST_FILE) @@ -41,16 +34,8 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): disable = True log.info('DISABLED: ffprobe failed to analyse test file. Stopping corruption check.') if test_details.get('streams'): - vid_streams = [ - item - for item in test_details['streams'] - if 'codec_type' in item and item['codec_type'] == 'video' - ] - aud_streams = [ - item - for item in test_details['streams'] - if 'codec_type' in item and item['codec_type'] == 'audio' - ] + vid_streams = [item for item in test_details['streams'] if 'codec_type' in item and item['codec_type'] == 'video'] + aud_streams = [item for item in test_details['streams'] if 'codec_type' in item and item['codec_type'] == 'audio'] if not (len(vid_streams) > 0 and len(aud_streams) > 0): disable = True log.info('DISABLED: ffprobe failed to analyse streams from test file. Stopping corruption check.') @@ -60,10 +45,8 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): # If it was successful, assume good. return False return True - log.info(f'Checking [{video.name}] for corruption, please stand by ...') video_details, result = get_video_details(video) - if result != 0: log.error(f'FAILED: [{video.name}] is corrupted!') return False @@ -72,24 +55,10 @@ def is_video_good(video: pathlib.Path, status, require_lan=None): log.info(f'FAILED: [{video.name}] returned error [{error_details}].') return False if video_details.get('streams'): - video_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'video' - ] - audio_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'audio' - ] + video_streams = [item for item in video_details['streams'] if item['codec_type'] == 'video'] + audio_streams = [item for item in video_details['streams'] if item['codec_type'] == 'audio'] if require_lan: - valid_audio = [ - item - for item in audio_streams - if 'tags' in item - and 'language' in item['tags'] - and item['tags']['language'] in require_lan - ] + valid_audio = [item for item in audio_streams if 'tags' in item and 'language' in item['tags'] and item['tags']['language'] in require_lan] else: valid_audio = audio_streams if len(video_streams) > 0 and len(valid_audio) > 0: @@ -122,17 +91,7 @@ def get_video_details(videofile, img=None): try: if img: videofile = '-' - command = [ - nzb2media.FFPROBE, - '-v', - 'quiet', - print_format, - 'json', - '-show_format', - '-show_streams', - '-show_error', - videofile, - ] + command = [nzb2media.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error', videofile] print_cmd(command) if img: procin = zip_out(file, img) @@ -147,16 +106,7 @@ def get_video_details(videofile, img=None): video_details = json.loads(proc_out.decode()) except Exception: try: # try this again without -show error in case of ffmpeg limitation - command = [ - nzb2media.FFPROBE, - '-v', - 'quiet', - print_format, - 'json', - '-show_format', - '-show_streams', - videofile, - ] + command = [nzb2media.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile] print_cmd(command) if img: procin = zip_out(file, img) @@ -181,16 +131,8 @@ def check_vid_file(video_details, result): return False if not video_details.get('streams'): return False - video_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'video' - ] - audio_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'audio' - ] + video_streams = [item for item in video_details['streams'] if item['codec_type'] == 'video'] + audio_streams = [item for item in video_details['streams'] if item['codec_type'] == 'audio'] if len(video_streams) > 0 and len(audio_streams) > 0: return True return False @@ -211,12 +153,8 @@ def build_commands(file, new_dir, movie_name): name = f'{movie_name}.cd{check.groups()[0]}' elif nzb2media.CONCAT and re.match('(.+)[cC][dD][0-9]', name): name = re.sub('([ ._=:-]+[cC][dD][0-9])', '', name) - if ( - ext == nzb2media.VEXTENSION and new_dir == directory - ): # we need to change the name to prevent overwriting itself. - nzb2media.VEXTENSION = ( - f'-transcoded{nzb2media.VEXTENSION}' # adds '-transcoded.ext' - ) + if ext == nzb2media.VEXTENSION and new_dir == directory: # we need to change the name to prevent overwriting itself. + nzb2media.VEXTENSION = f'-transcoded{nzb2media.VEXTENSION}' # adds '-transcoded.ext' new_file = file else: img, data = next(file.items()) @@ -225,20 +163,15 @@ def build_commands(file, new_dir, movie_name): rem_vid = [] for vid in data['files']: video_details, result = get_video_details(vid, img) - if not check_vid_file( - video_details, result, - ): # lets not transcode menu or other clips that don't have audio and video. + if not check_vid_file(video_details, result): + # lets not transcode menu or other clips that don't have audio and video. rem_vid.append(vid) data['files'] = [f for f in data['files'] if f not in rem_vid] new_file = {img: {'name': data['name'], 'files': data['files']}} video_details, result = get_video_details(data['files'][0], img) input_file = '-' file = '-' - - newfile_path = os.path.normpath( - os.path.join(new_dir, name) + nzb2media.VEXTENSION, - ) - + newfile_path = os.path.normpath(os.path.join(new_dir, name) + nzb2media.VEXTENSION) map_cmd = [] video_cmd = [] audio_cmd = [] @@ -246,14 +179,11 @@ def build_commands(file, new_dir, movie_name): sub_cmd = [] meta_cmd = [] other_cmd = [] - - if not video_details or not video_details.get( - 'streams', - ): # we couldn't read streams with ffprobe. Set defaults to try transcoding. + if not video_details or not video_details.get('streams'): + # we couldn't read streams with ffprobe. Set defaults to try transcoding. video_streams = [] audio_streams = [] sub_streams = [] - map_cmd.extend(['-map', '0']) if nzb2media.VCODEC: video_cmd.extend(['-c:v', nzb2media.VCODEC]) @@ -273,13 +203,10 @@ def build_commands(file, new_dir, movie_name): video_cmd.extend(['-crf', str(nzb2media.VCRF)]) if nzb2media.VLEVEL: video_cmd.extend(['-level', str(nzb2media.VLEVEL)]) - if nzb2media.ACODEC: audio_cmd.extend(['-c:a', nzb2media.ACODEC]) - if nzb2media.ACODEC in [ - 'aac', - 'dts', - ]: # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg + if nzb2media.ACODEC in ['aac', 'dts']: + # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg audio_cmd.extend(['-strict', '-2']) else: audio_cmd.extend(['-c:a', 'copy']) @@ -289,44 +216,20 @@ def build_commands(file, new_dir, movie_name): audio_cmd.extend(['-b:a', str(nzb2media.ABITRATE)]) if nzb2media.OUTPUTQUALITYPERCENT: audio_cmd.extend(['-q:a', str(nzb2media.OUTPUTQUALITYPERCENT)]) - if nzb2media.SCODEC and nzb2media.ALLOWSUBS: sub_cmd.extend(['-c:s', nzb2media.SCODEC]) - elif ( - nzb2media.ALLOWSUBS - ): # Not every subtitle codec can be used for every video container format! + elif nzb2media.ALLOWSUBS: # Not every subtitle codec can be used for every video container format! sub_cmd.extend(['-c:s', 'copy']) else: # http://en.wikibooks.org/wiki/FFMPEG_An_Intermediate_Guide/subtitle_options sub_cmd.extend(['-sn']) # Don't copy the subtitles over - if nzb2media.OUTPUTFASTSTART: other_cmd.extend(['-movflags', '+faststart']) - else: - video_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'video' - ] - audio_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'audio' - ] - sub_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'subtitle' - ] + video_streams = [item for item in video_details['streams'] if item['codec_type'] == 'video'] + audio_streams = [item for item in video_details['streams'] if item['codec_type'] == 'audio'] + sub_streams = [item for item in video_details['streams'] if item['codec_type'] == 'subtitle'] if nzb2media.VEXTENSION not in ['.mkv', '.mpegts']: - sub_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'subtitle' - and item['codec_name'] != 'hdmv_pgs_subtitle' - and item['codec_name'] != 'pgssub' - ] - + sub_streams = [item for item in video_details['streams'] if item['codec_type'] == 'subtitle' and item['codec_name'] != 'hdmv_pgs_subtitle' and item['codec_name'] != 'pgssub'] for video in video_streams: codec = video['codec_name'] frame_rate = video.get('avg_frame_rate', 0) @@ -337,25 +240,17 @@ def build_commands(file, new_dir, movie_name): video_cmd.extend(['-c:v', 'copy']) else: video_cmd.extend(['-c:v', nzb2media.VCODEC]) - if nzb2media.VFRAMERATE and not ( - nzb2media.VFRAMERATE * 0.999 <= frame_rate <= nzb2media.VFRAMERATE * 1.001 - ): + if nzb2media.VFRAMERATE and not (nzb2media.VFRAMERATE * 0.999 <= frame_rate <= nzb2media.VFRAMERATE * 1.001): video_cmd.extend(['-r', str(nzb2media.VFRAMERATE)]) if scale: w_scale = width / float(scale.split(':')[0]) h_scale = height / float(scale.split(':')[1]) if w_scale > h_scale: # widescreen, Scale by width only. - scale = '{width}:{height}'.format( - width=scale.split(':')[0], - height=int((height / w_scale) / 2) * 2, - ) + scale = '{width}:{height}'.format(width=scale.split(':')[0], height=int((height / w_scale) / 2) * 2) if w_scale > 1: video_cmd.extend(['-vf', f'scale={scale}']) else: # lower or matching ratio, scale by height only. - scale = '{width}:{height}'.format( - width=int((width / h_scale) / 2) * 2, - height=scale.split(':')[1], - ) + scale = '{width}:{height}'.format(width=int((width / h_scale) / 2) * 2, height=scale.split(':')[1]) if h_scale > 1: video_cmd.extend(['-vf', f'scale={scale}']) if nzb2media.VBITRATE: @@ -369,147 +264,96 @@ def build_commands(file, new_dir, movie_name): no_copy = ['-vf', '-r', '-crf', '-level', '-preset', '-b:v'] if video_cmd[1] == 'copy' and any(i in video_cmd for i in no_copy): video_cmd[1] = nzb2media.VCODEC - if ( - nzb2media.VCODEC == 'copy' - ): # force copy. therefore ignore all other video transcoding. + if nzb2media.VCODEC == 'copy': # force copy. therefore ignore all other video transcoding. video_cmd = ['-c:v', 'copy'] map_cmd.extend(['-map', '0:{index}'.format(index=video['index'])]) break # Only one video needed - used_audio = 0 a_mapped = [] commentary = [] if audio_streams: for i, val in reversed(list(enumerate(audio_streams))): try: - if 'Commentary' in val.get('tags').get( - 'title', - ): # Split out commentry tracks. + if 'Commentary' in val.get('tags').get('title'): + # Split out commentry tracks. commentary.append(val) del audio_streams[i] except Exception: continue try: - audio1 = [ - item - for item in audio_streams - if item['tags']['language'] == nzb2media.ALANGUAGE - ] + audio1 = [item for item in audio_streams if item['tags']['language'] == nzb2media.ALANGUAGE] except Exception: # no language tags. Assume only 1 language. audio1 = audio_streams try: - audio2 = [ - item - for item in audio1 - if item['codec_name'] in nzb2media.ACODEC_ALLOW - ] + audio2 = [item for item in audio1 if item['codec_name'] in nzb2media.ACODEC_ALLOW] except Exception: audio2 = [] try: - audio3 = [ - item - for item in audio_streams - if item['tags']['language'] != nzb2media.ALANGUAGE - ] + audio3 = [item for item in audio_streams if item['tags']['language'] != nzb2media.ALANGUAGE] except Exception: audio3 = [] try: - audio4 = [ - item - for item in audio3 - if item['codec_name'] in nzb2media.ACODEC_ALLOW - ] + audio4 = [item for item in audio3 if item['codec_name'] in nzb2media.ACODEC_ALLOW] except Exception: audio4 = [] - if audio2: # right (or only) language and codec... - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio2[0]['index'])], - ) + map_cmd.extend(['-map', '0:{index}'.format(index=audio2[0]['index'])]) a_mapped.extend([audio2[0]['index']]) bitrate = int(float(audio2[0].get('bit_rate', 0))) / 1000 channels = int(float(audio2[0].get('channels', 0))) audio_cmd.extend([f'-c:a:{used_audio}', 'copy']) elif audio1: # right (or only) language, wrong codec. - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio1[0]['index'])], - ) + map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]['index'])]) a_mapped.extend([audio1[0]['index']]) bitrate = int(float(audio1[0].get('bit_rate', 0))) / 1000 channels = int(float(audio1[0].get('channels', 0))) - audio_cmd.extend( - [f'-c:a:{used_audio}', nzb2media.ACODEC if nzb2media.ACODEC else 'copy'], - ) - elif audio4: # wrong language, right codec. - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio4[0]['index'])], - ) + audio_cmd.extend([f'-c:a:{used_audio}', nzb2media.ACODEC if nzb2media.ACODEC else 'copy']) + elif audio4: + # wrong language, right codec. + map_cmd.extend(['-map', '0:{index}'.format(index=audio4[0]['index'])]) a_mapped.extend([audio4[0]['index']]) bitrate = int(float(audio4[0].get('bit_rate', 0))) / 1000 channels = int(float(audio4[0].get('channels', 0))) audio_cmd.extend([f'-c:a:{used_audio}', 'copy']) - elif ( - audio3 - ): # wrong language, wrong codec. just pick the default audio track - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio3[0]['index'])], - ) + elif audio3: + # wrong language, wrong codec. just pick the default audio track + map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]['index'])]) a_mapped.extend([audio3[0]['index']]) bitrate = int(float(audio3[0].get('bit_rate', 0))) / 1000 channels = int(float(audio3[0].get('channels', 0))) - audio_cmd.extend( - [f'-c:a:{used_audio}', nzb2media.ACODEC if nzb2media.ACODEC else 'copy'], - ) - + audio_cmd.extend([f'-c:a:{used_audio}', nzb2media.ACODEC if nzb2media.ACODEC else 'copy']) if nzb2media.ACHANNELS and channels and channels > nzb2media.ACHANNELS: audio_cmd.extend([f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS)]) if audio_cmd[1] == 'copy': audio_cmd[1] = nzb2media.ACODEC - if nzb2media.ABITRATE and not ( - nzb2media.ABITRATE * 0.9 < bitrate < nzb2media.ABITRATE * 1.1 - ): + if nzb2media.ABITRATE and not (nzb2media.ABITRATE * 0.9 < bitrate < nzb2media.ABITRATE * 1.1): audio_cmd.extend([f'-b:a:{used_audio}', str(nzb2media.ABITRATE)]) if audio_cmd[1] == 'copy': audio_cmd[1] = nzb2media.ACODEC if nzb2media.OUTPUTQUALITYPERCENT: - audio_cmd.extend( - [f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)], - ) + audio_cmd.extend([f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)]) if audio_cmd[1] == 'copy': audio_cmd[1] = nzb2media.ACODEC if audio_cmd[1] in ['aac', 'dts']: audio_cmd[2:2] = ['-strict', '-2'] - if nzb2media.ACODEC2_ALLOW: used_audio += 1 try: - audio5 = [ - item - for item in audio1 - if item['codec_name'] in nzb2media.ACODEC2_ALLOW - ] + audio5 = [item for item in audio1 if item['codec_name'] in nzb2media.ACODEC2_ALLOW] except Exception: audio5 = [] try: - audio6 = [ - item - for item in audio3 - if item['codec_name'] in nzb2media.ACODEC2_ALLOW - ] + audio6 = [item for item in audio3 if item['codec_name'] in nzb2media.ACODEC2_ALLOW] except Exception: audio6 = [] if audio5: # right language and codec. - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio5[0]['index'])], - ) + map_cmd.extend(['-map', '0:{index}'.format(index=audio5[0]['index'])]) a_mapped.extend([audio5[0]['index']]) bitrate = int(float(audio5[0].get('bit_rate', 0))) / 1000 channels = int(float(audio5[0].get('channels', 0))) audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) elif audio1: # right language wrong codec. - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio1[0]['index'])], - ) + map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]['index'])]) a_mapped.extend([audio1[0]['index']]) bitrate = int(float(audio1[0].get('bit_rate', 0))) / 1000 channels = int(float(audio1[0].get('channels', 0))) @@ -518,19 +362,14 @@ def build_commands(file, new_dir, movie_name): else: audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) elif audio6: # wrong language, right codec - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio6[0]['index'])], - ) + map_cmd.extend(['-map', '0:{index}'.format(index=audio6[0]['index'])]) a_mapped.extend([audio6[0]['index']]) bitrate = int(float(audio6[0].get('bit_rate', 0))) / 1000 channels = int(float(audio6[0].get('channels', 0))) audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) - elif ( - audio3 - ): # wrong language, wrong codec just pick the default audio track - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio3[0]['index'])], - ) + elif audio3: + # wrong language, wrong codec just pick the default audio track + map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]['index'])]) a_mapped.extend([audio3[0]['index']]) bitrate = int(float(audio3[0].get('bit_rate', 0))) / 1000 channels = int(float(audio3[0].get('channels', 0))) @@ -538,44 +377,33 @@ def build_commands(file, new_dir, movie_name): audio_cmd2.extend([f'-c:a:{used_audio}', nzb2media.ACODEC2]) else: audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) - if nzb2media.ACHANNELS2 and channels and channels > nzb2media.ACHANNELS2: - audio_cmd2.extend( - [f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS2)], - ) + audio_cmd2.extend([f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS2)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = nzb2media.ACODEC2 - if nzb2media.ABITRATE2 and not ( - nzb2media.ABITRATE2 * 0.9 < bitrate < nzb2media.ABITRATE2 * 1.1 - ): + if nzb2media.ABITRATE2 and not (nzb2media.ABITRATE2 * 0.9 < bitrate < nzb2media.ABITRATE2 * 1.1): audio_cmd2.extend([f'-b:a:{used_audio}', str(nzb2media.ABITRATE2)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = nzb2media.ACODEC2 if nzb2media.OUTPUTQUALITYPERCENT: - audio_cmd2.extend( - [f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)], - ) + audio_cmd2.extend([f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = nzb2media.ACODEC2 if audio_cmd2[1] in ['aac', 'dts']: audio_cmd2[2:2] = ['-strict', '-2'] - - if ( - a_mapped[1] == a_mapped[0] and audio_cmd2[1:] == audio_cmd[1:] - ): # check for duplicate output track. + if a_mapped[1] == a_mapped[0] and audio_cmd2[1:] == audio_cmd[1:]: + # check for duplicate output track. del map_cmd[-2:] else: audio_cmd.extend(audio_cmd2) - if nzb2media.AINCLUDE and nzb2media.ACODEC3: - audio_streams.extend(commentary) # add commentry tracks back here. + # add commentary tracks back here. + audio_streams.extend(commentary) for audio in audio_streams: if audio['index'] in a_mapped: continue used_audio += 1 - map_cmd.extend( - ['-map', '0:{index}'.format(index=audio['index'])], - ) + map_cmd.extend(['-map', '0:{index}'.format(index=audio['index'])]) audio_cmd3 = [] bitrate = int(float(audio.get('bit_rate', 0))) / 1000 channels = int(float(audio.get('channels', 0))) @@ -586,39 +414,27 @@ def build_commands(file, new_dir, movie_name): audio_cmd3.extend([f'-c:a:{used_audio}', nzb2media.ACODEC3]) else: audio_cmd3.extend([f'-c:a:{used_audio}', 'copy']) - if nzb2media.ACHANNELS3 and channels and channels > nzb2media.ACHANNELS3: - audio_cmd3.extend( - [f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS3)], - ) + audio_cmd3.extend([f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS3)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = nzb2media.ACODEC3 - if nzb2media.ABITRATE3 and not ( - nzb2media.ABITRATE3 * 0.9 < bitrate < nzb2media.ABITRATE3 * 1.1 - ): - audio_cmd3.extend( - [f'-b:a:{used_audio}', str(nzb2media.ABITRATE3)], - ) + if nzb2media.ABITRATE3 and not (nzb2media.ABITRATE3 * 0.9 < bitrate < nzb2media.ABITRATE3 * 1.1): + audio_cmd3.extend([f'-b:a:{used_audio}', str(nzb2media.ABITRATE3)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = nzb2media.ACODEC3 if nzb2media.OUTPUTQUALITYPERCENT > 0: - audio_cmd3.extend( - [f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)], - ) + audio_cmd3.extend([f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = nzb2media.ACODEC3 if audio_cmd3[1] in ['aac', 'dts']: audio_cmd3[2:2] = ['-strict', '-2'] audio_cmd.extend(audio_cmd3) - s_mapped = [] burnt = 0 num = 0 for lan in nzb2media.SLANGUAGES: try: - subs1 = [ - item for item in sub_streams if item['tags']['language'] == lan - ] + subs1 = [item for item in sub_streams if item['tags']['language'] == lan] except Exception: subs1 = [] if nzb2media.BURN and not subs1 and not burnt and os.path.isfile(file): @@ -633,65 +449,46 @@ def build_commands(file, new_dir, movie_name): if sub_streams[index]['index'] == sub['index']: subloc = index break - video_cmd.extend( - ['-vf', f'subtitles={input_file}:si={subloc}'], - ) + video_cmd.extend(['-vf', f'subtitles={input_file}:si={subloc}']) burnt = 1 if not nzb2media.ALLOWSUBS: break - if ( - sub['codec_name'] in ['dvd_subtitle', 'VobSub'] - and nzb2media.SCODEC == 'mov_text' - ): # We can't convert these. + if sub['codec_name'] in ['dvd_subtitle', 'VobSub'] and nzb2media.SCODEC == 'mov_text': # We can't convert these. continue map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])]) s_mapped.extend([sub['index']]) - if nzb2media.SINCLUDE: for sub in sub_streams: if not nzb2media.ALLOWSUBS: break if sub['index'] in s_mapped: continue - if ( - sub['codec_name'] in ['dvd_subtitle', 'VobSub'] - and nzb2media.SCODEC == 'mov_text' - ): # We can't convert these. + if sub['codec_name'] in ['dvd_subtitle', 'VobSub'] and nzb2media.SCODEC == 'mov_text': # We can't convert these. continue map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])]) s_mapped.extend([sub['index']]) - if nzb2media.OUTPUTFASTSTART: other_cmd.extend(['-movflags', '+faststart']) if nzb2media.OTHEROPTS: other_cmd.extend(nzb2media.OTHEROPTS) - command = [nzb2media.FFMPEG, '-loglevel', 'warning'] - if nzb2media.HWACCEL: command.extend(['-hwaccel', 'auto']) if nzb2media.GENERALOPTS: command.extend(nzb2media.GENERALOPTS) - command.extend(['-i', input_file]) - if nzb2media.SEMBED and os.path.isfile(file): for subfile in get_subs(file): sub_details, result = get_video_details(subfile) if not sub_details or not sub_details.get('streams'): continue if nzb2media.SCODEC == 'mov_text': - subcode = [ - stream['codec_name'] for stream in sub_details['streams'] - ] - if set(subcode).intersection( - ['dvd_subtitle', 'VobSub'], - ): # We can't convert these. + subcode = [stream['codec_name'] for stream in sub_details['streams']] + if set(subcode).intersection(['dvd_subtitle', 'VobSub']): + # We can't convert these. continue command.extend(['-i', subfile]) - lan = os.path.splitext(os.path.splitext(subfile)[0])[1][1:].split( - '-', - )[0] + lan = os.path.splitext(os.path.splitext(subfile)[0])[1][1:].split('-')[0] metlan = None try: if len(lan) == 3: @@ -701,15 +498,9 @@ def build_commands(file, new_dir, movie_name): except Exception: pass if metlan: - meta_cmd.extend( - [ - f'-metadata:s:s:{len(s_mapped) + num}', - f'language={metlan.alpha3}', - ], - ) + meta_cmd.extend([f'-metadata:s:s:{len(s_mapped) + num}', f'language={metlan.alpha3}']) num += 1 map_cmd.extend(['-map', f'{num}:0']) - if not nzb2media.ALLOWSUBS or (not s_mapped and not num): sub_cmd.extend(['-sn']) else: @@ -717,7 +508,6 @@ def build_commands(file, new_dir, movie_name): sub_cmd.extend(['-c:s', nzb2media.SCODEC]) else: sub_cmd.extend(['-c:s', 'copy']) - command.extend(map_cmd) command.extend(video_cmd) command.extend(audio_cmd) @@ -738,11 +528,7 @@ def get_subs(file): for directory, _, filenames in os.walk(path): for filename in filenames: filepaths.extend([os.path.join(directory, filename)]) - subfiles = [ - item - for item in filepaths - if os.path.splitext(item)[1] in sub_ext and name in item - ] + subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext and name in item] return subfiles @@ -750,36 +536,20 @@ def extract_subs(file, newfile_path): video_details, result = get_video_details(file) if not video_details: return - if nzb2media.SUBSDIR: subdir = nzb2media.SUBSDIR else: subdir = os.path.split(newfile_path)[0] name = os.path.splitext(os.path.split(newfile_path)[1])[0] - try: - sub_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'subtitle' - and item['tags']['language'] in nzb2media.SLANGUAGES - and item['codec_name'] != 'hdmv_pgs_subtitle' - and item['codec_name'] != 'pgssub' - ] + sub_streams = [item for item in video_details['streams'] if item['codec_type'] == 'subtitle' and item['tags']['language'] in nzb2media.SLANGUAGES and item['codec_name'] != 'hdmv_pgs_subtitle' and item['codec_name'] != 'pgssub'] except Exception: - sub_streams = [ - item - for item in video_details['streams'] - if item['codec_type'] == 'subtitle' - and item['codec_name'] != 'hdmv_pgs_subtitle' - and item['codec_name'] != 'pgssub' - ] + sub_streams = [item for item in video_details['streams'] if item['codec_type'] == 'subtitle' and item['codec_name'] != 'hdmv_pgs_subtitle' and item['codec_name'] != 'pgssub'] num = len(sub_streams) for ea_num in range(num): sub = sub_streams[ea_num] idx = sub['index'] lan = sub.get('tags', {}).get('language', 'unk') - if num == 1: output_file = os.path.join(subdir, f'{name}.srt') if os.path.isfile(output_file): @@ -788,22 +558,9 @@ def extract_subs(file, newfile_path): output_file = os.path.join(subdir, f'{name}.{lan}.srt') if os.path.isfile(output_file): output_file = os.path.join(subdir, f'{name}.{lan}.{ea_num}.srt') - - command = [ - nzb2media.FFMPEG, - '-loglevel', - 'warning', - '-i', - file, - '-vn', - '-an', - f'-codec:{idx}', - 'srt', - output_file, - ] + command = [nzb2media.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', f'-codec:{idx}', 'srt', output_file] if platform.system() != 'Windows': command = nzb2media.NICENESS + command - log.info(f'Extracting {lan} subtitle from: {file}') print_cmd(command) result = 1 # set result to failed in case call fails. @@ -813,7 +570,6 @@ def extract_subs(file, newfile_path): result = proc.returncode except Exception: log.error('Extracting subtitle has failed') - if result == 0: try: shutil.copymode(file, output_file) @@ -833,17 +589,11 @@ def process_list(iterable, new_dir): success = True for item in iterable: ext = os.path.splitext(item)[1].lower() - if ( - ext in ['.iso', '.bin', '.img'] - and ext not in nzb2media.IGNOREEXTENSIONS - ): + if ext in ['.iso', '.bin', '.img'] and ext not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Attempting to rip disk image: {item}') new_list.extend(rip_iso(item, new_dir)) rem_list.append(item) - elif ( - re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', item) - and '.vob' not in nzb2media.IGNOREEXTENSIONS - ): + elif re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', item) and '.vob' not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Found VIDEO_TS image file: {item}') if not vts_path: try: @@ -851,22 +601,15 @@ def process_list(iterable, new_dir): except Exception: vts_path = os.path.split(item)[0] rem_list.append(item) - elif ( - re.match('.+BDMV[/\\]SOURCE[/\\][0-9]+[0-9].[Mm][Tt][Ss]', item) - and '.mts' not in nzb2media.IGNOREEXTENSIONS - ): + elif re.match('.+BDMV[/\\]SOURCE[/\\][0-9]+[0-9].[Mm][Tt][Ss]', item) and '.mts' not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Found MTS image file: {item}') if not mts_path: try: - mts_path = re.match('(.+BDMV[/\\]SOURCE)', item).groups()[ - 0 - ] + mts_path = re.match('(.+BDMV[/\\]SOURCE)', item).groups()[0] except Exception: mts_path = os.path.split(item)[0] rem_list.append(item) - elif re.match('.+VIDEO_TS.', item) or re.match( - '.+VTS_[0-9][0-9]_[0-9].', item, - ): + elif re.match('.+VIDEO_TS.', item) or re.match('.+VTS_[0-9][0-9]_[0-9].', item): rem_list.append(item) elif nzb2media.CONCAT and re.match('.+[cC][dD][0-9].', item): rem_list.append(item) @@ -880,11 +623,7 @@ def process_list(iterable, new_dir): if combine: new_list.extend(combine_cd(combine)) for file in new_list: - if ( - isinstance(file, str) - and 'concat:' not in file - and not os.path.isfile(file) - ): + if isinstance(file, str) and 'concat:' not in file and not os.path.isfile(file): success = False break if success and new_list: @@ -909,31 +648,21 @@ def mount_iso(item, new_dir): # Currently only supports Linux Mount when permis print_cmd(cmd) with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: proc_out, proc_err = proc.communicate() - nzb2media.MOUNTED = ( - mount_point # Allows us to verify this has been done and then cleanup. - ) + nzb2media.MOUNTED = mount_point # Allows us to verify this has been done and then cleanup. for root, _dirs, files in os.walk(mount_point): for file in files: full_path = os.path.join(root, file) - if ( - re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', full_path) - and '.vob' not in nzb2media.IGNOREEXTENSIONS - ): + if re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', full_path) and '.vob' not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Found VIDEO_TS image file: {full_path}') try: vts_path = re.match('(.+VIDEO_TS)', full_path).groups()[0] except Exception: vts_path = os.path.split(full_path)[0] return combine_vts(vts_path) - if ( - re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path) - and '.mts' not in nzb2media.IGNOREEXTENSIONS - ): + if re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path) and '.mts' not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Found MTS image file: {full_path}') try: - mts_path = re.match( - '(.+BDMV[/\\]STREAM)', full_path, - ).groups()[0] + mts_path = re.match('(.+BDMV[/\\]STREAM)', full_path).groups()[0] except Exception: mts_path = os.path.split(full_path)[0] return combine_mts(mts_path) @@ -960,17 +689,8 @@ def rip_iso(item, new_dir): print_cmd(cmd) with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: proc_out, proc_err = proc.communicate() - file_match_gen = ( - re.match( - r'.+(VIDEO_TS[/\\]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])', line, - ) - for line in proc_out.decode().splitlines() - ) - file_list = [ - file_match.groups()[0] - for file_match in file_match_gen - if file_match - ] + file_match_gen = (re.match(r'.+(VIDEO_TS[/\\]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])', line) for line in proc_out.decode().splitlines()) + file_list = [file_match.groups()[0] for file_match in file_match_gen if file_match] combined = [] if file_list: # handle DVD for title_set in range(99): @@ -988,29 +708,13 @@ def rip_iso(item, new_dir): if nzb2media.CONCAT: combined.extend(concat) continue - name = '{name}.cd{x}'.format( - name=os.path.splitext(os.path.split(item)[1])[0], - x=title_set + 1, - ) + name = f'{os.path.splitext(os.path.split(item)[1])[0]}.cd{title_set + 1}' new_files.append({item: {'name': name, 'files': concat}}) - else: # check BlueRay for BDMV/STREAM/XXXX.MTS - mts_list_gen = ( - re.match(r'.+(BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]).', line) - for line in proc_out.decode().splitlines() - ) - mts_list = [ - file_match.groups()[0] - for file_match in mts_list_gen - if file_match - ] - if sys.version_info[0] == 2: # Python2 sorting - mts_list.sort( - key=lambda f: int(filter(str.isdigit, f)), - ) # Sort all .mts files in numerical order - else: # Python3 sorting - mts_list.sort( - key=lambda f: int(''.join(filter(str.isdigit, f))), - ) + else: + # check BlueRay for BDMV/STREAM/XXXX.MTS + mts_list_gen = (re.match(r'.+(BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]).', line) for line in proc_out.decode().splitlines()) + mts_list = [file_match.groups()[0] for file_match in mts_list_gen if file_match] + mts_list.sort(key=lambda f: int(''.join(filter(str.isdigit, f)))) title_set = 0 for mts_name in mts_list: concat = [] @@ -1019,10 +723,7 @@ def rip_iso(item, new_dir): if nzb2media.CONCAT: combined.extend(concat) continue - name = '{name}.cd{x}'.format( - name=os.path.splitext(os.path.split(item)[1])[0], - x=title_set, - ) + name = f'{os.path.splitext(os.path.split(item)[1])[0]}.cd{title_set}' new_files.append({item: {'name': name, 'files': concat}}) if nzb2media.CONCAT and combined: name = os.path.splitext(os.path.split(item)[1])[0] @@ -1059,10 +760,7 @@ def combine_vts(vts_path): if nzb2media.CONCAT: combined.extend(concat) continue - name = '{name}.cd{x}'.format( - name=name, - x=title_set + 1, - ) + name = f'{name}.cd{title_set + 1}' new_files.append({vts_path: {'name': name, 'files': concat}}) if nzb2media.CONCAT: new_files.append({vts_path: {'name': name, 'files': combined}}) @@ -1078,11 +776,7 @@ def combine_mts(mts_path): else: name = os.path.basename(name) num = 0 - mts_list = [ - f - for f in os.listdir(mts_path) - if os.path.isfile(os.path.join(mts_path, f)) - ] + mts_list = [f for f in os.listdir(mts_path) if os.path.isfile(os.path.join(mts_path, f))] if sys.version_info[0] == 2: # Python2 sorting mts_list.sort(key=lambda f: int(filter(str.isdigit, f))) else: # Python3 sorting @@ -1093,10 +787,7 @@ def combine_mts(mts_path): if nzb2media.CONCAT: combined.extend(concat) continue - name = '{name}.cd{x}'.format( - name=name, - x=num + 1, - ) + name = f'{name}.cd{num + 1}' new_files.append({mts_path: {'name': name, 'files': concat}}) num += 1 if nzb2media.CONCAT: @@ -1106,19 +797,10 @@ def combine_mts(mts_path): def combine_cd(combine): new_files = [] - for item in { - re.match('(.+)[cC][dD][0-9].', ea_item).groups()[0] - for ea_item in combine - }: + for item in {re.match('(.+)[cC][dD][0-9].', ea_item).groups()[0] for ea_item in combine}: concat = '' for num in range(99): - files = [ - file - for file in combine - if num + 1 - == int(re.match('.+[cC][dD]([0-9]+).', file).groups()[0]) - and item in file - ] + files = [file for file in combine if num + 1 == int(re.match('.+[cC][dD]([0-9]+).', file).groups()[0]) and item in file] if files: concat += f'{files[0]}|' else: @@ -1149,36 +831,25 @@ def transcode_directory(dir_name): else: new_dir = dir_name movie_name = os.path.splitext(os.path.split(dir_name)[1])[0] - file_list = nzb2media.list_media_files( - dir_name, media=True, audio=False, meta=False, archives=False, - ) + file_list = nzb2media.list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) file_list, rem_list, new_list, success = process_list(file_list, new_dir) if not success: return 1, dir_name - for file in file_list: - if ( - isinstance(file, str) - and os.path.splitext(file)[1] in nzb2media.IGNOREEXTENSIONS - ): + if isinstance(file, str) and os.path.splitext(file)[1] in nzb2media.IGNOREEXTENSIONS: continue command, file = build_commands(file, new_dir, movie_name) newfile_path = command[-1] - # transcoding files may remove the original file, so make sure to extract subtitles first if nzb2media.SEXTRACT and isinstance(file, str): extract_subs(file, newfile_path) - try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) os.remove(newfile_path) except OSError as error: - if ( - error.errno != errno.ENOENT - ): # Ignore the error if it's just telling us that the file doesn't exist + if error.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist log.debug(f'Error when removing transcoding target: {error}') except Exception as error: log.debug(f'Error when removing transcoding target: {error}') - log.info(f'Transcoding video: {newfile_path}') print_cmd(command) result = 1 # set result to failed in case call fails. @@ -1201,27 +872,21 @@ def transcode_directory(dir_name): result = proc.returncode except Exception: log.error(f'Transcoding of video {newfile_path} has failed') - if nzb2media.SUBSDIR and result == 0 and isinstance(file, str): for sub in get_subs(file): name = os.path.splitext(os.path.split(file)[1])[0] subname = os.path.split(sub)[1] newname = os.path.splitext(os.path.split(newfile_path)[1])[0] - newpath = os.path.join( - nzb2media.SUBSDIR, subname.replace(name, newname), - ) + newpath = os.path.join(nzb2media.SUBSDIR, subname.replace(name, newname)) if not os.path.isfile(newpath): os.rename(sub, newpath) - if result == 0: try: shutil.copymode(file, newfile_path) except Exception: pass log.info(f'Transcoding of video to {newfile_path} succeeded') - if os.path.isfile(newfile_path) and ( - file in new_list or not nzb2media.DUPLICATE - ): + if os.path.isfile(newfile_path) and (file in new_list or not nzb2media.DUPLICATE): try: os.unlink(file) except Exception: @@ -1245,13 +910,11 @@ def transcode_directory(dir_name): os.unlink(file) except Exception: pass - if not os.listdir( - new_dir, - ): # this is an empty directory and we didn't transcode into it. + if not os.listdir(new_dir): + # this is an empty directory and we didn't transcode into it. os.rmdir(new_dir) new_dir = dir_name - if ( - not nzb2media.PROCESSOUTPUT and nzb2media.DUPLICATE - ): # We postprocess the original files to CP/SB + if not nzb2media.PROCESSOUTPUT and nzb2media.DUPLICATE: + # We postprocess the original files to CP/SB new_dir = dir_name return final_result, new_dir diff --git a/nzb2media/user_scripts.py b/nzb2media/user_scripts.py index 92a2c0bf..4929817b 100644 --- a/nzb2media/user_scripts.py +++ b/nzb2media/user_scripts.py @@ -18,27 +18,17 @@ log.addHandler(logging.NullHandler()) def external_script(output_destination, torrent_name, torrent_label, settings): final_result = 0 # start at 0. num_files = 0 - nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = settings.get( - 'user_script_mediaExtensions', '', - ) + nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = settings.get('user_script_mediaExtensions', '') try: if isinstance(nzb2media.USER_SCRIPT_MEDIAEXTENSIONS, str): - nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = ( - nzb2media.USER_SCRIPT_MEDIAEXTENSIONS.lower().split(',') - ) + nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = nzb2media.USER_SCRIPT_MEDIAEXTENSIONS.lower().split(',') except Exception: log.error('user_script_mediaExtensions could not be set') nzb2media.USER_SCRIPT_MEDIAEXTENSIONS = [] - nzb2media.USER_SCRIPT = settings.get('user_script_path', '') - if not nzb2media.USER_SCRIPT or nzb2media.USER_SCRIPT == 'None': # do nothing and return success. This allows the user an option to Link files only and not run a script. - return ProcessResult( - status_code=0, - message='No user script defined', - ) - + return ProcessResult(status_code=0, message='No user script defined') nzb2media.USER_SCRIPT_PARAM = settings.get('user_script_param', '') try: if isinstance(nzb2media.USER_SCRIPT_PARAM, str): @@ -46,49 +36,30 @@ def external_script(output_destination, torrent_name, torrent_label, settings): except Exception: log.error('user_script_params could not be set') nzb2media.USER_SCRIPT_PARAM = [] - nzb2media.USER_SCRIPT_SUCCESSCODES = settings.get('user_script_successCodes', 0) try: if isinstance(nzb2media.USER_SCRIPT_SUCCESSCODES, str): - nzb2media.USER_SCRIPT_SUCCESSCODES = ( - nzb2media.USER_SCRIPT_SUCCESSCODES.split(',') - ) + nzb2media.USER_SCRIPT_SUCCESSCODES = nzb2media.USER_SCRIPT_SUCCESSCODES.split(',') except Exception: log.error('user_script_successCodes could not be set') nzb2media.USER_SCRIPT_SUCCESSCODES = 0 - nzb2media.USER_SCRIPT_CLEAN = int(settings.get('user_script_clean', 1)) nzb2media.USER_SCRIPT_RUNONCE = int(settings.get('user_script_runOnce', 1)) - if nzb2media.CHECK_MEDIA: - for video in list_media_files( - output_destination, - media=True, - audio=False, - meta=False, - archives=False, - ): + for video in list_media_files(output_destination, media=True, audio=False, meta=False, archives=False): if transcoder.is_video_good(video, 0): import_subs(video) else: log.info(f'Corrupt video file found {video}. Deleting.') os.unlink(video) - for dirpath, _, filenames in os.walk(output_destination): for file in filenames: - file_path = nzb2media.os.path.join(dirpath, file) file_name, file_extension = os.path.splitext(file) log.debug(f'Checking file {file} to see if this should be processed.') - - if ( - file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS - or 'all' in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS - ): + if file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS or 'all' in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS: num_files += 1 - if ( - nzb2media.USER_SCRIPT_RUNONCE == 1 and num_files > 1 - ): # we have already run once, so just continue to get number of files. + if nzb2media.USER_SCRIPT_RUNONCE == 1 and num_files > 1: # we have already run once, so just continue to get number of files. continue command = [nzb2media.USER_SCRIPT] for param in nzb2media.USER_SCRIPT_PARAM: @@ -133,28 +104,15 @@ def external_script(output_destination, torrent_name, torrent_label, settings): log.info(f'If the UserScript completed successfully you should add {res} to the user_script_successCodes') result = 1 final_result += result - num_files_new = 0 for _, _, filenames in os.walk(output_destination): for file in filenames: file_name, file_extension = os.path.splitext(file) - - if ( - file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS - or nzb2media.USER_SCRIPT_MEDIAEXTENSIONS == 'ALL' - ): + if file_extension in nzb2media.USER_SCRIPT_MEDIAEXTENSIONS or nzb2media.USER_SCRIPT_MEDIAEXTENSIONS == 'ALL': num_files_new += 1 - - if ( - nzb2media.USER_SCRIPT_CLEAN == int(1) - and num_files_new == 0 - and final_result == 0 - ): + if nzb2media.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: log.info(f'All files have been processed. Cleaning outputDirectory {output_destination}') remove_dir(output_destination) elif nzb2media.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: log.info(f'{num_files} files were processed, but {num_files_new} still remain. outputDirectory will not be cleaned.') - return ProcessResult( - status_code=final_result, - message='User Script Completed', - ) + return ProcessResult(status_code=final_result, message='User Script Completed') diff --git a/nzb2media/utils/common.py b/nzb2media/utils/common.py index 8257d8a6..da7ec2b7 100644 --- a/nzb2media/utils/common.py +++ b/nzb2media/utils/common.py @@ -15,9 +15,7 @@ log.addHandler(logging.NullHandler()) def flatten(output_destination): - return flatten_dir( - output_destination, list_media_files(output_destination), - ) + return flatten_dir(output_destination, list_media_files(output_destination)) def clean_dir(path, section, subsection): @@ -25,9 +23,7 @@ def clean_dir(path, section, subsection): min_size = int(cfg.get('minSize', 0)) delete_ignored = int(cfg.get('delete_ignored', 0)) try: - files = list_media_files( - path, min_size=min_size, delete_ignored=delete_ignored, - ) + files = list_media_files(path, min_size=min_size, delete_ignored=delete_ignored) except Exception: files = [] return clean_directory(path, files) @@ -35,72 +31,45 @@ def clean_dir(path, section, subsection): def process_dir(path, link): folders = [] - log.info(f'Searching {path} for mediafiles to post-process ...') dir_contents = os.listdir(path) - # search for single files and move them into their own folder for post-processing - # Generate list of sync files - sync_files = ( - item - for item in dir_contents - if os.path.splitext(item)[1] in ['.!sync', '.bts'] - ) - + sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in ['.!sync', '.bts']) # Generate a list of file paths - filepaths = ( - os.path.join(path, item) - for item in dir_contents - if item not in ['Thumbs.db', 'thumbs.db'] - ) - + filepaths = (os.path.join(path, item) for item in dir_contents if item not in ['Thumbs.db', 'thumbs.db']) # Generate a list of media files mediafiles = (item for item in filepaths if os.path.isfile(item)) - if not any(sync_files): for mediafile in mediafiles: try: move_file(mediafile, path, link) except Exception as error: log.error(f'Failed to move {os.path.split(mediafile)[1]} to its own directory: {error}') - # removeEmptyFolders(path, removeRoot=False) - # Generate all path contents path_contents = (os.path.join(path, item) for item in os.listdir(path)) - # Generate all directories from path contents directories = (path for path in path_contents if os.path.isdir(path)) - for directory in directories: dir_contents = os.listdir(directory) - sync_files = ( - item - for item in dir_contents - if os.path.splitext(item)[1] in ['.!sync', '.bts'] - ) + sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in ['.!sync', '.bts']) if not any(dir_contents) or any(sync_files): continue folders.append(directory) - return folders def get_dirs(section, subsection, link='hard'): to_return = [] - watch_directory = nzb2media.CFG[section][subsection]['watch_dir'] directory = os.path.join(watch_directory, subsection) - if not os.path.exists(directory): directory = watch_directory - try: to_return.extend(process_dir(directory, link)) except Exception as error: log.error(f'Failed to add directories from {watch_directory} for post-processing: {error}') - if nzb2media.USE_LINK == 'move': try: output_directory = os.path.join(nzb2media.OUTPUT_DIRECTORY, subsection) @@ -108,20 +77,12 @@ def get_dirs(section, subsection, link='hard'): to_return.extend(process_dir(output_directory, link)) except Exception as error: log.error(f'Failed to add directories from {nzb2media.OUTPUT_DIRECTORY} for post-processing: {error}') - if not to_return: log.debug(f'No directories identified in {section}:{subsection} for post-processing') - return list(set(to_return)) -def create_url( - scheme: str, - host: str, - port: int | None = None, - path: str = '', - query: str = '', -) -> str: +def create_url(scheme: str, host: str, port: int | None = None, path: str = '', query: str = '') -> str: """Create a url from its component parts.""" netloc = host if port is None else f'{host}:{port}' fragments = '' diff --git a/nzb2media/utils/download_info.py b/nzb2media/utils/download_info.py index aa44a142..f05ed676 100644 --- a/nzb2media/utils/download_info.py +++ b/nzb2media/utils/download_info.py @@ -7,7 +7,6 @@ from nzb2media import main_db log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - database = main_db.DBConnection() diff --git a/nzb2media/utils/encoding.py b/nzb2media/utils/encoding.py index bd384930..79c2f399 100644 --- a/nzb2media/utils/encoding.py +++ b/nzb2media/utils/encoding.py @@ -38,9 +38,7 @@ def char_replace(name_in): break else: # Detect UTF-8 - if ((character == 0xC2) | (character == 0xC3)) & ( - (next_character >= 0xA0) & (next_character <= 0xFF) - ): + if ((character == 0xC2) | (character == 0xC3)) & ((next_character >= 0xA0) & (next_character <= 0xFF)): encoding = 'utf-8' break # Detect CP850 @@ -60,19 +58,13 @@ def char_replace(name_in): def convert_to_ascii(input_name, dir_name): - ascii_convert = int(nzb2media.CFG['ASCII']['convert']) - if ( - ascii_convert == 0 or os.name == 'nt' - ): # just return if we don't want to convert or on windows os and '\' is replaced!. + if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and '\' is replaced!. return input_name, dir_name - encoded, input_name = char_replace(input_name) - directory, base = os.path.split(dir_name) if not base: # ended with '/' directory, base = os.path.split(directory) - encoded, base2 = char_replace(base) if encoded: dir_name = os.path.join(directory, base2) @@ -80,25 +72,16 @@ def convert_to_ascii(input_name, dir_name): os.rename(os.path.join(directory, base), dir_name) if 'NZBOP_SCRIPTDIR' in os.environ: print(f'[NZB] DIRECTORY={dir_name}') - for dirname, dirnames, _ in os.walk(dir_name, topdown=False): for subdirname in dirnames: encoded, subdirname2 = char_replace(subdirname) if encoded: log.info(f'Renaming directory to: {subdirname2}.') - os.rename( - os.path.join(dirname, subdirname), - os.path.join(dirname, subdirname2), - ) - + os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2)) for dirname, _, filenames in os.walk(dir_name): for filename in filenames: encoded, filename2 = char_replace(filename) if encoded: log.info(f'Renaming file to: {filename2}.') - os.rename( - os.path.join(dirname, filename), - os.path.join(dirname, filename2), - ) - + os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2)) return input_name, dir_name diff --git a/nzb2media/utils/files.py b/nzb2media/utils/files.py index 5a3d9033..e773f869 100644 --- a/nzb2media/utils/files.py +++ b/nzb2media/utils/files.py @@ -29,65 +29,46 @@ def move_file(filename, path, link): try: if file_ext in nzb2media.AUDIO_CONTAINER: guess = mediafile.MediaFile(filename) - # get artist and album info artist = guess.artist album = guess.album - # create new path - new_path = os.path.join( - path, f'{sanitize_name(artist)} - {sanitize_name(album)}', - ) + new_path = os.path.join(path, f'{sanitize_name(artist)} - {sanitize_name(album)}') elif file_ext in nzb2media.MEDIA_CONTAINER: guess = guessit.guessit(filename) - # get title title = guess.get('series') or guess.get('title') - if not title: title = os.path.splitext(os.path.basename(filename))[0] - new_path = os.path.join(path, sanitize_name(title)) except Exception as error: log.error(f'Exception parsing name for media file: {os.path.split(filename)[1]}: {error}') - if not new_path: title = os.path.splitext(os.path.basename(filename))[0] new_path = os.path.join(path, sanitize_name(title)) - # # Removed as encoding of directory no-longer required # try: # new_path = new_path.encode(nzb2media.SYS_ENCODING) # except Exception: # pass - # Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe). if os.path.isfile(new_path): - new_path2 = os.path.join( - os.path.join(os.path.split(new_path)[0], 'new'), - os.path.split(new_path)[1], - ) + new_path2 = os.path.join(os.path.join(os.path.split(new_path)[0], 'new'), os.path.split(new_path)[1]) new_path = new_path2 - # create new path if it does not exist if not os.path.exists(new_path): make_dir(new_path) - - newfile = os.path.join( - new_path, sanitize_name(os.path.split(filename)[1]), - ) + newfile = os.path.join(new_path, sanitize_name(os.path.split(filename)[1])) try: newfile = newfile.encode(nzb2media.SYS_ENCODING) except Exception: pass - # link file to its new path copy_link(filename, newfile, link) def is_min_size(input_name, min_size): file_name, file_ext = os.path.splitext(os.path.basename(input_name)) - # audio files we need to check directory size not file size input_size = os.path.getsize(input_name) if file_ext in nzb2media.AUDIO_CONTAINER: @@ -96,7 +77,6 @@ def is_min_size(input_name, min_size): except Exception: log.error(f'Failed to get file size for {input_name}') return True - # Ignore files under a certain size if input_size > min_size * 1048576: return True @@ -110,59 +90,27 @@ def is_archive_file(filename): return False -def is_media_file( - mediafile, - media=True, - audio=True, - meta=True, - archives=True, - other=False, - otherext=None, -): +def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=None): if otherext is None: otherext = [] - file_name, file_ext = os.path.splitext(mediafile) - try: # ignore MAC OS's 'resource fork' files if file_name.startswith('._'): return False except Exception: pass - - return any( - [ - (media and file_ext.lower() in nzb2media.MEDIA_CONTAINER), - (audio and file_ext.lower() in nzb2media.AUDIO_CONTAINER), - (meta and file_ext.lower() in nzb2media.META_CONTAINER), - (archives and is_archive_file(mediafile)), - (other and (file_ext.lower() in otherext or 'all' in otherext)), - ], - ) + return any([(media and file_ext.lower() in nzb2media.MEDIA_CONTAINER), (audio and file_ext.lower() in nzb2media.AUDIO_CONTAINER), (meta and file_ext.lower() in nzb2media.META_CONTAINER), (archives and is_archive_file(mediafile)), (other and (file_ext.lower() in otherext or 'all' in otherext))]) -def list_media_files( - path, - min_size=0, - delete_ignored=0, - media=True, - audio=True, - meta=True, - archives=True, - other=False, - otherext=None, -): +def list_media_files(path, min_size=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=None): if otherext is None: otherext = [] - files = [] if not os.path.isdir(path): if os.path.isfile(path): # Single file downloads. cur_file = os.path.split(path)[1] - if is_media_file( - cur_file, media, audio, meta, archives, other, otherext, - ): + if is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files if is_sample(path) or not is_min_size(path, min_size): if delete_ignored == 1: @@ -173,33 +121,15 @@ def list_media_files( pass else: files.append(path) - return files - for cur_file in os.listdir(path): full_cur_file = os.path.join(path, cur_file) - # if it's a folder do it recursively if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): - files += list_media_files( - full_cur_file, - min_size, - delete_ignored, - media, - audio, - meta, - archives, - other, - otherext, - ) - - elif is_media_file( - cur_file, media, audio, meta, archives, other, otherext, - ): + files += list_media_files(full_cur_file, min_size, delete_ignored, media, audio, meta, archives, other, otherext) + elif is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(full_cur_file) or not is_min_size( - full_cur_file, min_size, - ): + if is_sample(full_cur_file) or not is_min_size(full_cur_file, min_size): if delete_ignored == 1: try: os.unlink(full_cur_file) @@ -207,38 +137,28 @@ def list_media_files( except Exception: pass continue - files.append(full_cur_file) - return sorted(files, key=len) def extract_files(src, dst=None, keep_archive=None): extracted_folder = [] extracted_archive = [] - - for input_file in list_media_files( - src, media=False, audio=False, meta=False, archives=True, - ): + for input_file in list_media_files(src, media=False, audio=False, meta=False, archives=True): dir_path = os.path.dirname(input_file) full_file_name = os.path.basename(input_file) archive_name = os.path.splitext(full_file_name)[0] archive_name = re.sub(r'part[0-9]+', '', archive_name) - if dir_path in extracted_folder and archive_name in extracted_archive: continue # no need to extract this, but keep going to look for other archives and sub directories. - try: if extractor.extract(input_file, dst or dir_path): extracted_folder.append(dir_path) extracted_archive.append(archive_name) except Exception: log.error(f'Extraction failed for: {full_file_name}') - for folder in extracted_folder: - for input_file in list_media_files( - folder, media=False, audio=False, meta=False, archives=True, - ): + for input_file in list_media_files(folder, media=False, audio=False, meta=False, archives=True): full_file_name = os.path.basename(input_file) archive_name = os.path.splitext(full_file_name)[0] archive_name = re.sub(r'part[0-9]+', '', archive_name) diff --git a/nzb2media/utils/identification.py b/nzb2media/utils/identification.py index e275a3c4..f51e6021 100644 --- a/nzb2media/utils/identification.py +++ b/nzb2media/utils/identification.py @@ -16,7 +16,6 @@ log.addHandler(logging.NullHandler()) def find_imdbid(dir_name, input_name, omdb_api_key): imdbid = None log.info(f'Attemping imdbID lookup for {input_name}') - # find imdbid in dirName log.info('Searching folder and file names for imdbID ...') match = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name) @@ -34,9 +33,7 @@ def find_imdbid(dir_name, input_name, omdb_api_key): if 'NZBPR__DNZB_MOREINFO' in os.environ: dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '') if dnzb_more_info != '': - regex = re.compile( - r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE, - ) + regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE) match = regex.match(dnzb_more_info) if match: imdbid = match.group(1) @@ -52,81 +49,57 @@ def find_imdbid(dir_name, input_name, omdb_api_key): title = None if 'title' in guess: title = guess['title'] - # Movie Year year = None if 'year' in guess: year = guess['year'] - url = 'http://www.omdbapi.com' - if not omdb_api_key: log.info('Unable to determine imdbID: No api key provided for omdbapi.com.') return - log.debug(f'Opening URL: {url}') - try: - response = requests.get( - url, - params={'apikey': omdb_api_key, 'y': year, 't': title}, - verify=False, - timeout=(60, 300), - ) + response = requests.get(url, params={'apikey': omdb_api_key, 'y': year, 't': title}, verify=False, timeout=(60, 300)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') return - try: results = response.json() except Exception: log.error('No json data returned from omdbapi.com') - try: imdbid = results['imdbID'] except Exception: log.error('No imdbID returned from omdbapi.com') - if imdbid: log.info(f'Found imdbID [{imdbid}]') return imdbid - log.warning(f'Unable to find a imdbID for {input_name}') return imdbid -def category_search( - input_directory, input_name, input_category, root, categories, -): +def category_search(input_directory, input_name, input_category, root, categories): tordir = False - if input_directory is None: # =Nothing to process here. return input_directory, input_name, input_category, root - pathlist = os.path.normpath(input_directory).split(os.sep) - if input_category and input_category in pathlist: log.debug(f'SEARCH: Found the Category: {input_category} in directory structure') elif input_category: log.debug(f'SEARCH: Could not find the category: {input_category} in the directory structure') else: try: - input_category = list(set(pathlist) & set(categories))[ - -1 - ] # assume last match is most relevant category. + input_category = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category. log.debug(f'SEARCH: Found Category: {input_category} in directory structure') except IndexError: input_category = '' log.debug('SEARCH: Could not find a category in the directory structure') - if not os.path.isdir(input_directory) and os.path.isfile( - input_directory, - ): # If the input directory is a file + if not os.path.isdir(input_directory) and os.path.isfile(input_directory): + # If the input directory is a file if not input_name: input_name = os.path.split(os.path.normpath(input_directory))[1] return input_directory, input_name, input_category, root - if input_category and os.path.isdir( - os.path.join(input_directory, input_category), - ): + if input_category and os.path.isdir(os.path.join(input_directory, input_category)): log.info(f'SEARCH: Found category directory {input_category} in input directory directory {input_directory}') input_directory = os.path.join(input_directory, input_category) log.info(f'SEARCH: Setting input_directory to {input_directory}') @@ -135,53 +108,36 @@ def category_search( input_directory = os.path.join(input_directory, input_name) log.info(f'SEARCH: Setting input_directory to {input_directory}') tordir = True - elif input_name and os.path.isdir( - os.path.join(input_directory, sanitize_name(input_name)), - ): + elif input_name and os.path.isdir(os.path.join(input_directory, sanitize_name(input_name))): log.info(f'SEARCH: Found torrent directory {sanitize_name(input_name)} in input directory directory {input_directory}') - input_directory = os.path.join( - input_directory, sanitize_name(input_name), - ) + input_directory = os.path.join(input_directory, sanitize_name(input_name)) log.info(f'SEARCH: Setting input_directory to {input_directory}') tordir = True - elif input_name and os.path.isfile( - os.path.join(input_directory, input_name), - ): + elif input_name and os.path.isfile(os.path.join(input_directory, input_name)): log.info(f'SEARCH: Found torrent file {input_name} in input directory directory {input_directory}') input_directory = os.path.join(input_directory, input_name) log.info(f'SEARCH: Setting input_directory to {input_directory}') tordir = True - elif input_name and os.path.isfile( - os.path.join(input_directory, sanitize_name(input_name)), - ): + elif input_name and os.path.isfile(os.path.join(input_directory, sanitize_name(input_name))): log.info(f'SEARCH: Found torrent file {sanitize_name(input_name)} in input directory directory {input_directory}') - input_directory = os.path.join( - input_directory, sanitize_name(input_name), - ) + input_directory = os.path.join(input_directory, sanitize_name(input_name)) log.info(f'SEARCH: Setting input_directory to {input_directory}') tordir = True elif input_name and os.path.isdir(input_directory): for file in os.listdir(input_directory): - if os.path.splitext(file)[0] in [ - input_name, - sanitize_name(input_name), - ]: + if os.path.splitext(file)[0] in [input_name, sanitize_name(input_name)]: log.info(f'SEARCH: Found torrent file {file} in input directory directory {input_directory}') input_directory = os.path.join(input_directory, file) log.info(f'SEARCH: Setting input_directory to {input_directory}') input_name = file tordir = True break - - imdbid = [ - item for item in pathlist if '.cp(tt' in item - ] # This looks for the .cp(tt imdb id in the path. + # This looks for the .cp(tt imdb id in the path. + imdbid = [item for item in pathlist if '.cp(tt' in item] if imdbid and '.cp(tt' not in input_name: - input_name = imdbid[ - 0 - ] # This ensures the imdb id is preserved and passed to CP + input_name = imdbid[0] + # This ensures the imdb id is preserved and passed to CP tordir = True - if input_category and not tordir: try: index = pathlist.index(input_category) @@ -192,7 +148,6 @@ def category_search( input_name = pathlist[index + 1] except ValueError: pass - if input_name and not tordir: if input_name in pathlist or sanitize_name(input_name) in pathlist: log.info(f'SEARCH: Found torrent directory {input_name} in the directory structure') @@ -201,9 +156,7 @@ def category_search( root = 1 if not tordir: root = 2 - if root > 0: log.info('SEARCH: Could not find a unique directory for this download. Assume a common directory.') log.info('SEARCH: We will try and determine which files to process, individually') - return input_directory, input_name, input_category, root diff --git a/nzb2media/utils/links.py b/nzb2media/utils/links.py index 9c4de104..789ce3d8 100644 --- a/nzb2media/utils/links.py +++ b/nzb2media/utils/links.py @@ -10,7 +10,6 @@ from nzb2media.utils.paths import make_dir log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - try: from jaraco.windows.filesystem import islink, readlink except ImportError: @@ -25,7 +24,6 @@ def copy_link(src, target_link, use_link): log.info(f'MEDIAFILE: [{os.path.basename(target_link)}]') log.info(f'SOURCE FOLDER: [{os.path.dirname(src)}]') log.info(f'TARGET FOLDER: [{os.path.dirname(target_link)}]') - if src != target_link and os.path.exists(target_link): log.info('MEDIAFILE already exists in the TARGET folder, skipping ...') return True @@ -35,7 +33,6 @@ def copy_link(src, target_link, use_link): if src == os.path.dirname(target_link): log.info('SOURCE AND TARGET folders are the same, skipping ...') return True - make_dir(os.path.dirname(target_link)) try: if use_link == 'dir': @@ -65,28 +62,22 @@ def copy_link(src, target_link, use_link): return True except Exception as error: log.warning(f'Error: {error}, copying instead ... ') - log.info('Copying SOURCE MEDIAFILE -> TARGET FOLDER') shutil.copy(src, target_link) - return True def replace_links(link, max_depth=10): link_depth = 0 target = link - for attempt in range(0, max_depth): if not islink(target): break target = readlink(target) link_depth = attempt - if not link_depth: log.debug(f'{link} is not a link') - elif link_depth > max_depth or ( - link_depth == max_depth and islink(target) - ): + elif link_depth > max_depth or (link_depth == max_depth and islink(target)): log.warning(f'Exceeded maximum depth {max_depth} while following link {link}') else: log.info(f'Changing sym-link: {link} to point directly to file: {target}') diff --git a/nzb2media/utils/naming.py b/nzb2media/utils/naming.py index be66fe58..9f5c3cb3 100644 --- a/nzb2media/utils/naming.py +++ b/nzb2media/utils/naming.py @@ -6,7 +6,6 @@ import re def sanitize_name(name): """ Remove bad chars from the filename. - >>> sanitize_name('a/b/c') 'a-b-c' >>> sanitize_name('abc') @@ -18,24 +17,20 @@ def sanitize_name(name): """ name = re.sub(r'[\\/*]', '-', name) name = re.sub(r'[:\'<>|?]', '', name) - # remove leading/trailing periods and spaces name = name.strip(' .') - return name def clean_file_name(filename): """ Clean up nzb name by removing any . and _ characters and trailing hyphens. - Is basically equivalent to replacing all _ and . with a space, but handles decimal numbers in string, for example: """ filename = re.sub(r'(\D)\.(?!\s)(\D)', r'\1 \2', filename) - filename = re.sub( - r'(\d)\.(\d{4})', r'\1 \2', filename, - ) # if it ends in a year then don't keep the dot + # if it ends in a year then don't keep the dot + filename = re.sub(r'(\d)\.(\d{4})', r'\1 \2', filename) filename = re.sub(r'(\D)\.(?!\s)', r'\1 ', filename) filename = re.sub(r'\.(?!\s)(\D)', r' \1', filename) filename = filename.replace('_', ' ') diff --git a/nzb2media/utils/network.py b/nzb2media/utils/network.py index d2056376..d2ab18be 100644 --- a/nzb2media/utils/network.py +++ b/nzb2media/utils/network.py @@ -26,12 +26,10 @@ def wake_on_lan(ethernet_address): """Send a WakeOnLan request.""" # Create the WoL magic packet magic_packet = make_wake_on_lan_packet(ethernet_address) - # ...and send it to the broadcast address using UDP with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as connection: connection.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) connection.sendto(magic_packet, ('', 9)) - log.info(f'WakeOnLan sent for mac: {ethernet_address}') @@ -52,9 +50,7 @@ def wake_up(): port = int(wol['port']) mac = wol['mac'] max_attempts = 4 - log.info('Trying to wake On lan.') - for attempt in range(0, max_attempts): log.info(f'Attempt {attempt + 1} of {max_attempts}') if test_connection(host, port) == 'Up': @@ -66,7 +62,6 @@ def wake_up(): if test_connection(host, port) == 'Down': # final check. msg = 'System with mac: {0} has not woken after {1} attempts.' log.warning(msg.format(mac, max_attempts)) - log.info('Continuing with the rest of the script.') @@ -108,20 +103,12 @@ def find_download(client_agent, download_id): else: base_url = f'http://{nzb2media.SABNZBD_HOST}:{nzb2media.SABNZBD_PORT}/api' url = base_url - params = { - 'apikey': nzb2media.SABNZBD_APIKEY, - 'mode': 'get_files', - 'output': 'json', - 'value': download_id, - } + params = {'apikey': nzb2media.SABNZBD_APIKEY, 'mode': 'get_files', 'output': 'json', 'value': download_id} try: - response = requests.get( - url, params=params, verify=False, timeout=(30, 120), - ) + response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: log.error('Unable to open URL') return False # failure - result = response.json() if result['files']: return True diff --git a/nzb2media/utils/nzb.py b/nzb2media/utils/nzb.py index 92167314..774c561c 100644 --- a/nzb2media/utils/nzb.py +++ b/nzb2media/utils/nzb.py @@ -20,11 +20,7 @@ def get_nzoid(input_name): else: base_url = f'http://{nzb2media.SABNZBD_HOST}:{nzb2media.SABNZBD_PORT}/api' url = base_url - params = { - 'apikey': nzb2media.SABNZBD_APIKEY, - 'mode': 'queue', - 'output': 'json', - } + params = {'apikey': nzb2media.SABNZBD_APIKEY, 'mode': 'queue', 'output': 'json'} try: response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: @@ -33,12 +29,7 @@ def get_nzoid(input_name): try: result = response.json() clean_name = os.path.splitext(os.path.split(input_name)[1])[0] - slots.extend( - [ - (slot['nzo_id'], slot['filename']) - for slot in result['queue']['slots'] - ], - ) + slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']]) except Exception: log.warning('Data from SABnzbd queue could not be parsed') params['mode'] = 'history' @@ -50,12 +41,7 @@ def get_nzoid(input_name): try: result = response.json() clean_name = os.path.splitext(os.path.split(input_name)[1])[0] - slots.extend( - [ - (slot['nzo_id'], slot['name']) - for slot in result['history']['slots'] - ], - ) + slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']]) except Exception: log.warning('Data from SABnzbd history could not be parsed') try: diff --git a/nzb2media/utils/parsers.py b/nzb2media/utils/parsers.py index 3e237090..ca3fd996 100644 --- a/nzb2media/utils/parsers.py +++ b/nzb2media/utils/parsers.py @@ -14,8 +14,7 @@ def parse_other(args): def parse_rtorrent(args): - # rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia, - # 'execute={/path/to/nzbToMedia/TorrentToMedia.py,\'$d.get_base_path=\',\'$d.get_name=\',\'$d.get_custom1=\',\'$d.get_hash=\'}' + # rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia, # 'execute={/path/to/nzbToMedia/TorrentToMedia.py,\'$d.get_base_path=\',\'$d.get_name=\',\'$d.get_custom1=\',\'$d.get_hash=\'}' input_directory = os.path.normpath(args[1]) try: input_name = args[2] @@ -33,7 +32,6 @@ def parse_rtorrent(args): input_id = args[4] except Exception: input_id = '' - return input_directory, input_name, input_category, input_hash, input_id @@ -53,7 +51,6 @@ def parse_utorrent(args): input_id = args[4] except Exception: input_id = '' - return input_directory, input_name, input_category, input_hash, input_id @@ -64,11 +61,7 @@ def parse_deluge(args): input_hash = args[1] input_id = args[1] try: - input_category = ( - nzb2media.TORRENT_CLASS.core.get_torrent_status(input_id, ['label']) - .get(b'label') - .decode() - ) + input_category = nzb2media.TORRENT_CLASS.core.get_torrent_status(input_id, ['label']).get(b'label').decode() except Exception: input_category = '' return input_directory, input_name, input_category, input_hash, input_id @@ -92,13 +85,7 @@ def parse_synods(args): input_name = os.getenv('TR_TORRENT_NAME') input_hash = os.getenv('TR_TORRENT_HASH') if not input_name: # No info passed. Assume manual download. - return ( - input_directory, - input_name, - input_category, - input_hash, - input_id, - ) + return input_directory, input_name, input_category, input_hash, input_id torrent_id = os.getenv('TR_TORRENT_ID') input_id = f'dbid_{torrent_id}' # res = nzb2media.TORRENT_CLASS.tasks_list(additional_param='detail') @@ -152,7 +139,6 @@ def parse_vuze(args): input_name = cur_input[5] except Exception: pass - return input_directory, input_name, input_category, input_hash, input_id @@ -186,22 +172,11 @@ def parse_qbittorrent(args): input_id = cur_input[3].replace('\'', '') except Exception: input_id = '' - return input_directory, input_name, input_category, input_hash, input_id def parse_args(client_agent, args): - clients = { - 'other': parse_other, - 'rtorrent': parse_rtorrent, - 'utorrent': parse_utorrent, - 'deluge': parse_deluge, - 'transmission': parse_transmission, - 'qbittorrent': parse_qbittorrent, - 'vuze': parse_vuze, - 'synods': parse_synods, - } - + clients = {'other': parse_other, 'rtorrent': parse_rtorrent, 'utorrent': parse_utorrent, 'deluge': parse_deluge, 'transmission': parse_transmission, 'qbittorrent': parse_qbittorrent, 'vuze': parse_vuze, 'synods': parse_synods} try: return clients[client_agent](args) except Exception: diff --git a/nzb2media/utils/paths.py b/nzb2media/utils/paths.py index d16b274f..4a7f48ee 100644 --- a/nzb2media/utils/paths.py +++ b/nzb2media/utils/paths.py @@ -16,12 +16,9 @@ log.addHandler(logging.NullHandler()) def onerror(func, path, exc_info): """ Error handler for ``shutil.rmtree``. - If the error is due to an access error (read only file) it attempts to add write permission and then retries. - If the error is for another reason it re-raises the error. - Usage : ``shutil.rmtree(path, onerror=onerror)`` """ if not os.access(path, os.W_OK): @@ -69,17 +66,13 @@ def remote_dir(path): def get_dir_size(input_path): prepend = partial(os.path.join, input_path) - return sum( - (os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) - for f in map(prepend, os.listdir(input_path)) - ) + return sum((os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) for f in map(prepend, os.listdir(input_path))) def remove_empty_folders(path, remove_root=True): """Remove empty folders.""" if not os.path.isdir(path): return - # remove empty subfolders log.debug(f'Checking for empty folders in:{path}') files = os.listdir(path) @@ -88,7 +81,6 @@ def remove_empty_folders(path, remove_root=True): fullpath = os.path.join(path, each_file) if os.path.isdir(fullpath): remove_empty_folders(fullpath) - # if folder empty, delete it files = os.listdir(path) if len(files) == 0 and remove_root: @@ -128,16 +120,13 @@ def clean_directory(path, files): if not os.path.exists(path): log.info(f'Directory {path} has been processed and removed ...') return - if nzb2media.FORCE_CLEAN and not nzb2media.FAILED: log.info(f'Doing Forceful Clean of {path}') remove_dir(path) return - if files: log.info(f'Directory {path} still contains {len(files)} unprocessed file(s), skipping ...') return - log.info(f'Directory {path} has been processed, removing ...') try: shutil.rmtree(path, onerror=onerror) @@ -150,7 +139,6 @@ def rchmod(path, mod): os.chmod(path, mod) if not os.path.isdir(path): return # Skip files - for root, dirs, files in os.walk(path): for each_dir in dirs: os.chmod(os.path.join(root, each_dir), mod) diff --git a/nzb2media/utils/processes.py b/nzb2media/utils/processes.py index cf4756cc..7de910d7 100644 --- a/nzb2media/utils/processes.py +++ b/nzb2media/utils/processes.py @@ -13,7 +13,6 @@ if os.name == 'nt': from win32event import CreateMutex from win32api import CloseHandle, GetLastError from winerror import ERROR_ALREADY_EXISTS - log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) @@ -76,7 +75,6 @@ class PosixProcess: self.lasterror = False else: self.lasterror = False - if not self.lasterror: # Write my pid into pidFile to keep multiple copies of program # from running @@ -100,18 +98,14 @@ else: def restart(): install_type = nzb2media.version_check.CheckVersion().install_type - status = 0 popen_list = [] - if install_type in ('git', 'source'): popen_list = [sys.executable, nzb2media.APP_FILENAME] - if popen_list: popen_list += nzb2media.SYS_ARGV log.info(f'Restarting nzbToMedia with {popen_list}') with subprocess.Popen(popen_list, cwd=os.getcwd()) as proc: proc.wait() status = proc.returncode - os._exit(status) diff --git a/nzb2media/utils/torrent.py b/nzb2media/utils/torrent.py index 4897ef03..a5049a7e 100644 --- a/nzb2media/utils/torrent.py +++ b/nzb2media/utils/torrent.py @@ -12,20 +12,12 @@ from nzb2media.torrent import utorrent log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) - -torrent_clients = { - 'deluge': deluge, - 'qbittorrent': qbittorrent, - 'transmission': transmission, - 'utorrent': utorrent, - 'synods': synology, -} +torrent_clients = {'deluge': deluge, 'qbittorrent': qbittorrent, 'transmission': transmission, 'utorrent': utorrent, 'synods': synology} def create_torrent_class(client_agent): if nzb2media.APP_NAME != 'TorrentToMedia.py': return # Skip loading Torrent for NZBs. - try: agent = torrent_clients[client_agent] except KeyError: diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index fed1e778..079cf5b6 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -28,7 +28,6 @@ class CheckVersion: self.install_type = self.find_install_type() self.installed_version = None self.installed_branch = None - if self.install_type == 'git': self.updater = GitUpdateManager() elif self.install_type == 'source': @@ -42,7 +41,6 @@ class CheckVersion: def find_install_type(self): """ Determine how this copy of SB was installed. - returns: type of installation. Possible values are: 'win': any compiled windows build 'git': running from source using git @@ -53,27 +51,22 @@ class CheckVersion: install_type = 'git' else: install_type = 'source' - return install_type def check_for_new_version(self, force=False): """ Check the internet for a newer version. - returns: bool, True for new version or False for no new version. - force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced """ if not nzb2media.VERSION_NOTIFY and not force: log.info('Version checking is disabled, not checking for the newest version') return False - log.info(f'Checking if {self.install_type} needs an update') if not self.updater.need_update(): nzb2media.NEWEST_VERSION_STRING = None log.info('No update needed') return False - self.updater.set_newest_text() return True @@ -100,7 +93,6 @@ class GitUpdateManager(UpdateManager): self.github_repo_user = self.get_github_repo_user() self.github_repo = self.get_github_repo() self.branch = self._find_git_branch() - self._cur_commit_hash = None self._newest_commit_hash = None self._num_commits_behind = 0 @@ -111,86 +103,56 @@ class GitUpdateManager(UpdateManager): def _find_working_git(self): test_cmd = 'version' - if nzb2media.GIT_PATH: main_git = f'"{nzb2media.GIT_PATH}"' else: main_git = 'git' - log.debug(f'Checking if we can use git commands: {main_git} {test_cmd}') output, err, exit_status = self._run_git(main_git, test_cmd) - if exit_status == 0: log.debug(f'Using: {main_git}') return main_git log.debug(f'Not using: {main_git}') - # trying alternatives - alternative_git = [] - # osx people who start SB from launchd have a broken path, so try a hail-mary attempt for them if platform.system().lower() == 'darwin': alternative_git.append('/usr/local/git/bin/git') - if platform.system().lower() == 'windows': if main_git != main_git.lower(): alternative_git.append(main_git.lower()) - if alternative_git: log.debug('Trying known alternative git locations') - for cur_git in alternative_git: log.debug(f'Checking if we can use git commands: {cur_git} {test_cmd}') output, err, exit_status = self._run_git(cur_git, test_cmd) - if exit_status == 0: log.debug(f'Using: {cur_git}') return cur_git log.debug(f'Not using: {cur_git}') - # Still haven't found a working git - log.debug( - 'Unable to find your git executable - ' - 'Set git_path in your autoProcessMedia.cfg OR ' - 'delete your .git folder and run from source to enable updates.', - ) - + log.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') return None def _run_git(self, git_path, args): - result = None proc_err = None - if not git_path: log.debug('No git specified, can\'t use git commands') proc_status = 1 return result, proc_err, proc_status - cmd = f'{git_path} {args}' - try: log.debug(f'Executing {cmd} with your shell in {nzb2media.APP_ROOT}') - with subprocess.Popen( - cmd, - stdin=PIPE, - stdout=PIPE, - stderr=STDOUT, - shell=True, - cwd=nzb2media.APP_ROOT, - ) as proc: + with subprocess.Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=True, cwd=nzb2media.APP_ROOT) as proc: proc_out, proc_err = proc.communicate() proc_status = proc.returncode - if nzb2media.LOG_GIT: msg = proc_out.decode('utf-8').strip() log.debug(f'git output: {msg}') - except OSError: log.error(f'Command {cmd} didn\'t work') proc_status = 1 - proc_status = 128 if ('fatal:' in result) or proc_err else proc_status if proc_status == 0: log.debug(f'{cmd} : returned successful') @@ -201,21 +163,15 @@ class GitUpdateManager(UpdateManager): if nzb2media.LOG_GIT: log.debug(f'{cmd} returned : {result}, treat as error for now') proc_status = 1 - return result, proc_err, proc_status def _find_installed_version(self): """ Attempt to find the currently installed version of Sick Beard. - Uses git show to get commit version. - Returns: True for success or False for failure """ - output, err, exit_status = self._run_git( - self._git_path, 'rev-parse HEAD', - ) # @UnusedVariable - + output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD') if exit_status == 0 and output: cur_commit_hash = output.strip() if not re.match('^[a-z0-9]+$', cur_commit_hash): @@ -229,9 +185,7 @@ class GitUpdateManager(UpdateManager): def _find_git_branch(self): nzb2media.NZBTOMEDIA_BRANCH = self.get_github_branch() - branch_info, err, exit_status = self._run_git( - self._git_path, 'symbolic-ref -q HEAD', - ) # @UnusedVariable + branch_info, err, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD') if exit_status == 0 and branch_info: branch = branch_info.strip().replace('refs/heads/', '', 1) if branch: @@ -242,7 +196,6 @@ class GitUpdateManager(UpdateManager): def _check_github_for_update(self): """ Check Github for a new version. - Uses git commands to check if there is a newer version than the provided commit hash. If there is a newer version it sets _num_commits_behind. @@ -250,24 +203,15 @@ class GitUpdateManager(UpdateManager): self._newest_commit_hash = None self._num_commits_behind = 0 self._num_commits_ahead = 0 - # get all new info from github - output, err, exit_status = self._run_git( - self._git_path, 'fetch origin', - ) - + output, err, exit_status = self._run_git(self._git_path, 'fetch origin') if not exit_status == 0: log.error('Unable to contact github, can\'t check for update') return - # get latest commit_hash from remote - output, err, exit_status = self._run_git( - self._git_path, 'rev-parse --verify --quiet \'@{upstream}\'', - ) - + output, err, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet \'@{upstream}\'') if exit_status == 0 and output: cur_commit_hash = output.strip() - if not re.match('^[a-z0-9]+$', cur_commit_hash): log.debug('Output doesn\'t look like a hash, not using it') return @@ -275,22 +219,15 @@ class GitUpdateManager(UpdateManager): else: log.debug('git didn\'t return newest commit hash') return - # get number of commits behind and ahead (option --count not supported git < 1.7.2) - output, err, exit_status = self._run_git( - self._git_path, 'rev-list --left-right \'@{upstream}\'...HEAD', - ) - + output, err, exit_status = self._run_git(self._git_path, 'rev-list --left-right \'@{upstream}\'...HEAD') if exit_status == 0 and output: - try: self._num_commits_behind = int(output.count('<')) self._num_commits_ahead = int(output.count('>')) - except Exception: log.debug('git didn\'t return numbers for behind and ahead, not using it') return - log.debug(f'cur_commit = {self._cur_commit_hash} % (newest_commit)= {self._newest_commit_hash}, num_commits_behind = {self._num_commits_behind}, num_commits_ahead = {self._num_commits_ahead}') def set_newest_text(self): @@ -305,35 +242,26 @@ class GitUpdateManager(UpdateManager): if not self._find_installed_version(): log.error('Unable to determine installed version via git, please check your logs!') return False - if not self._cur_commit_hash: return True - try: self._check_github_for_update() except Exception as error: log.error(f'Unable to contact github, can\'t check for update: {error!r}') return False - if self._num_commits_behind > 0: return True - return False def update(self): """ Check git for a new version. - Calls git pull origin in order to update Sick Beard. Returns a bool depending on the call's success. """ - output, err, exit_status = self._run_git( - self._git_path, f'pull origin {self.branch}', - ) # @UnusedVariable - + output, err, exit_status = self._run_git(self._git_path, f'pull origin {self.branch}') if exit_status == 0: return True - return False @@ -342,99 +270,71 @@ class SourceUpdateManager(UpdateManager): self.github_repo_user = self.get_github_repo_user() self.github_repo = self.get_github_repo() self.branch = self.get_github_branch() - self._cur_commit_hash = None self._newest_commit_hash = None self._num_commits_behind = 0 def _find_installed_version(self): - version_file = os.path.join(nzb2media.APP_ROOT, 'version.txt') - if not os.path.isfile(version_file): self._cur_commit_hash = None return - try: with open(version_file) as fin: self._cur_commit_hash = fin.read().strip(' \n\r') except OSError as error: log.debug(f'Unable to open \'version.txt\': {error}') - if not self._cur_commit_hash: self._cur_commit_hash = None else: nzb2media.NZBTOMEDIA_VERSION = self._cur_commit_hash def need_update(self): - self._find_installed_version() - try: self._check_github_for_update() except Exception as error: log.error(f'Unable to contact github, can\'t check for update: {error!r}') return False - if not self._cur_commit_hash or self._num_commits_behind > 0: return True - return False def _check_github_for_update(self): """ Check Github for a new version. - Uses pygithub to ask github if there is a newer version than the provided commit hash. If there is a newer version it sets Sick Beard's version text. - commit_hash: hash that we're checking against """ self._num_commits_behind = 0 self._newest_commit_hash = None - - repository = github.GitHub( - self.github_repo_user, self.github_repo, self.branch, - ) - + repository = github.GitHub(self.github_repo_user, self.github_repo, self.branch) # try to get newest commit hash and commits behind directly by # comparing branch and current commit if self._cur_commit_hash: - branch_compared = repository.compare( - base=self.branch, head=self._cur_commit_hash, - ) - + branch_compared = repository.compare(base=self.branch, head=self._cur_commit_hash) if 'base_commit' in branch_compared: - self._newest_commit_hash = branch_compared['base_commit'][ - 'sha' - ] - + self._newest_commit_hash = branch_compared['base_commit']['sha'] if 'behind_by' in branch_compared: self._num_commits_behind = int(branch_compared['behind_by']) - # fall back and iterate over last 100 (items per page in gh_api) commits if not self._newest_commit_hash: - for cur_commit in repository.commits(): if not self._newest_commit_hash: self._newest_commit_hash = cur_commit['sha'] if not self._cur_commit_hash: break - if cur_commit['sha'] == self._cur_commit_hash: break - # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 - log.debug(f'cur_commit = {self._cur_commit_hash} % (newest_commit)= {self._newest_commit_hash}, num_commits_behind = {self._num_commits_behind}') def set_newest_text(self): - # if we're up to date then don't set this nzb2media.NEWEST_VERSION_STRING = None - if not self._cur_commit_hash: log.error('Unknown current version number, don\'t know if we should update or not') elif self._num_commits_behind > 0: @@ -444,67 +344,46 @@ class SourceUpdateManager(UpdateManager): def update(self): """Download and install latest source tarball from github.""" - tar_download_url = ( - f'https://github.com/{self.github_repo_user}/{self.github_repo}/tarball/{self.branch}' - ) + tar_download_url = f'https://github.com/{self.github_repo_user}/{self.github_repo}/tarball/{self.branch}' version_path = os.path.join(nzb2media.APP_ROOT, 'version.txt') - try: # prepare the update dir sb_update_dir = os.path.join(nzb2media.APP_ROOT, 'sb-update') - if os.path.isdir(sb_update_dir): log.info(f'Clearing out update folder {sb_update_dir} before extracting') shutil.rmtree(sb_update_dir) - log.info(f'Creating update folder {sb_update_dir} before extracting') os.makedirs(sb_update_dir) - # retrieve file log.info(f'Downloading update from {tar_download_url!r}') - tar_download_path = os.path.join( - sb_update_dir, 'nzbtomedia-update.tar', - ) + tar_download_path = os.path.join(sb_update_dir, 'nzbtomedia-update.tar') urlretrieve(tar_download_url, tar_download_path) - if not os.path.isfile(tar_download_path): log.error(f'Unable to retrieve new version from {tar_download_url}, can\'t update') return False - if not tarfile.is_tarfile(tar_download_path): log.error(f'Retrieved version from {tar_download_url} is corrupt, can\'t update') return False - # extract to sb-update dir log.info(f'Extracting file {tar_download_path}') with tarfile.open(tar_download_path) as tar: tar.extractall(sb_update_dir) - # delete .tar.gz log.info(f'Deleting file {tar_download_path}') os.remove(tar_download_path) - # find update dir name - update_dir_contents = [ - x - for x in os.listdir(sb_update_dir) - if os.path.isdir(os.path.join(sb_update_dir, x)) - ] + update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))] if len(update_dir_contents) != 1: log.error(f'Invalid update data, update failed: {update_dir_contents}') return False content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) - # walk temp folder and move files to main folder log.info(f'Moving files from {content_dir} to {nzb2media.APP_ROOT}') - for dirname, _, filenames in os.walk( - content_dir, - ): # @UnusedVariable - dirname = dirname[len(content_dir) + 1:] + for dirname, _, filenames in os.walk(content_dir): + dirname = dirname[len(content_dir) + 1 :] for curfile in filenames: old_path = os.path.join(content_dir, dirname, curfile) new_path = os.path.join(nzb2media.APP_ROOT, dirname, curfile) - # Avoid DLL access problem on WIN32/64 # These files needing to be updated manually # or find a way to kill the access from memory @@ -518,11 +397,9 @@ class SourceUpdateManager(UpdateManager): # Trash the updated file without moving in new path os.remove(old_path) continue - if os.path.isfile(new_path): os.remove(new_path) os.renames(old_path, new_path) - # update version.txt with commit hash try: with open(version_path, 'w') as ver_file: @@ -530,10 +407,8 @@ class SourceUpdateManager(UpdateManager): except OSError as error: log.error(f'Unable to write version file, update not complete: {error}') return False - except Exception as error: log.error(f'Error while trying to update: {error}') log.debug(f'Traceback: {traceback.format_exc()}') return False - return True From b75d448ff7c658995e7e22b6639f7f73d9629aa8 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 18:30:43 -0500 Subject: [PATCH 11/14] Fix lint errors --- .pre-commit-config.yaml | 13 + .pylintrc.ini | 528 ++++++++++++++++++++++++++++++ TorrentToMedia.py | 35 +- nzb2media/__init__.py | 14 +- nzb2media/auto_process/common.py | 2 +- nzb2media/auto_process/movies.py | 14 +- nzb2media/auto_process/music.py | 14 +- nzb2media/auto_process/tv.py | 27 +- nzb2media/configuration.py | 125 +++---- nzb2media/extractor/__init__.py | 10 +- nzb2media/github_api.py | 6 +- nzb2media/main_db.py | 11 +- nzb2media/managers/pymedusa.py | 2 +- nzb2media/managers/sickbeard.py | 19 +- nzb2media/plugins/plex.py | 5 +- nzb2media/processor/nzb.py | 2 +- nzb2media/processor/nzbget.py | 5 +- nzb2media/scene_exceptions.py | 4 +- nzb2media/torrent/utorrent.py | 1 + nzb2media/transcoder.py | 88 +++-- nzb2media/user_scripts.py | 12 +- nzb2media/utils/common.py | 6 +- nzb2media/utils/encoding.py | 6 +- nzb2media/utils/files.py | 3 +- nzb2media/utils/identification.py | 8 +- nzb2media/utils/links.py | 7 +- nzb2media/utils/naming.py | 3 +- nzb2media/utils/parsers.py | 4 +- nzb2media/utils/paths.py | 2 +- nzb2media/utils/processes.py | 11 +- nzb2media/utils/torrent.py | 7 +- nzb2media/version_check.py | 36 +- nzbToCouchPotato.py | 4 +- nzbToGamez.py | 4 +- nzbToHeadPhones.py | 4 +- nzbToLazyLibrarian.py | 4 +- nzbToLidarr.py | 4 +- nzbToMedia.py | 2 +- nzbToMylar.py | 4 +- nzbToNzbDrone.py | 4 +- nzbToRadarr.py | 4 +- nzbToSiCKRAGE.py | 4 +- nzbToSickBeard.py | 4 +- nzbToWatcher3.py | 4 +- 44 files changed, 835 insertions(+), 241 deletions(-) create mode 100644 .pylintrc.ini diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d7c2ba05..1df66caf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,3 +23,16 @@ repos: # rev: v2.0.0 # hooks: # - id: autopep8 +- repo: local + hooks: + - id: pylint + name: pylint + entry: pylint + language: system + types: [python] + args: + [ + "-rn", # Only display messages + "-sn", # Disable score + "--rcfile=.pylintrc.ini", # Link to your config file + ] diff --git a/.pylintrc.ini b/.pylintrc.ini new file mode 100644 index 00000000..cc89f12c --- /dev/null +++ b/.pylintrc.ini @@ -0,0 +1,528 @@ +[MAIN] +load-plugins= + pylint.extensions.broad_try_clause, + pylint.extensions.code_style, + pylint.extensions.emptystring, + pylint.extensions.comparetozero, + pylint.extensions.comparison_placement, + pylint.extensions.confusing_elif, + pylint.extensions.for_any_all, + pylint.extensions.consider_ternary_expression, + pylint.extensions.bad_builtin, + pylint.extensions.mccabe, +; pylint.extensions.dict_init_mutate, + pylint.extensions.docstyle, +; pylint.extensions.dunder, + pylint.extensions.check_elif, + pylint.extensions.empty_comment, + pylint.extensions.eq_without_hash, + pylint.extensions.private_import, +; pylint.extensions.magic_value, + pylint.extensions.redefined_variable_type, + pylint.extensions.no_self_use, + pylint.extensions.overlapping_exceptions, + pylint.extensions.docparams, + pylint.extensions.redefined_loop_name, + pylint.extensions.set_membership, + pylint.extensions.typing, + pylint.extensions.while_used, + +[MESSAGES CONTROL] +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" + +; # --- FATAL --------- +; F0001, # fatal +; F0002, # astroid-error +; F0010, # parse-error +; F0011, # config-parse-error +; F0202, # method-check-failed + +; # --- ERROR --------- +; E0001, # syntax-error +; E0011, # unrecognized-inline-option +; E0013, # bad-plugin-value +; E0014, # bad-configuration-SECTION +; E0015, # unrecognized-option +; E0100, # init-is-generator +; E0101, # return-in-init +; E0102, # function-redefined +; E0103, # not-in-loop +; E0104, # return-outside-function +; E0105, # yield-outside-function, +; E0106, # return-arg-in-generator +; E0107, # nonexistent-operator +; E0108, # duplicate-argument-name +; E0110, # abstract-class-instantiated +; E0111, # bad-reversed-sequence +; E0112, # too-many-star-expressions +; E0113, # invalid-star-assignment-target +; E0114, # star-needs-assignment-target +; E0115, # nonlocal-and-global +; E0116, # continue-in-finally +; E0117, # nonlocal-without-binding +; E0118, # used-prior-global-declaration +; E0119, # misplaced-format-function +; E0202, # method-hidden +; E0203, # access-member-before-definition +; E0211, # no-method-argument +; E0213, # no-self-argument +; E0236, # invalid-slots-object +; E0237, # assigning-non-slot +; E0238, # invalid-slots +; E0239, # inherit-non-class +; E0240, # inconsistent-mro +; E0241, # duplicate-bases +; E0242, # class-variable-slots-conflict +; E0243, # invalid-class-object +; E0244, # invalid-enum-extension +; E0301, # non-iterator-returned +; E0302, # unexpected-special-method-signature +; E0303, # invalid-length-returned +; E0304, # invalid-bool-returned +; E0305, # invalid-index-returned +; E0306, # invalid-repr-returned +; E0307, # invalid-str-returned +; E0308, # invalid-bytes-returned +; E0309, # invalid-hash-returned +; E0310, # invalid-length-hint-returned +; E0311, # invalid-format-returned +; E0312, # invalid-getnewargs-returned +; E0313, # invalid-getnewargs-ex-returned +; E0401, # import-error +; E0402, # relative-beyond-top-level +; E0601, # used-before-assignment +; E0602, # undefined-variable +; E0603, # undefined-all-variable +; E0604, # invalid-all-object +; E0605, # invalid-all-format +; E0611, # no-name-in-module +; E0633, # unpacking-non-sequence +; E0643, # potential-index-error +; E0701, # bad-except-order +; E0702, # raising-bad-type +; E0704, # misplaced-bare-raise +; E0705, # bad-exception-cause +; E0710, # raising-non-exception +; E0711, # notimplemented-raised +; E0712, # catching-non-exception +; E1003, # bad-super-call +; E1101, # no-member +; E1102, # not-callable +; E1111, # assignment-from-no-return +; E1120, # no-value-for-parameter +; E1121, # too-many-function-args +; E1123, # unexpected-keyword-arg +; E1124, # redundant-keyword-arg +; E1125, # missing-kwoa +; E1126, # invalid-sequence-index +; E1127, # invalid-slice-index +; E1128, # assignment-from-none +; E1129, # not-context-manager +; E1130, # invalid-unary-operand-type +; E1131, # unsupported-binary-operation +; E1132, # repeated-keyword +; E1133, # not-an-iterable +; E1134, # not-a-mapping +; E1135, # unsupported-membership-test +; E1136, # unsubscriptable-object +; E1137, # unsupported-assignment-operation +; E1138, # unsupported-delete-operation +; E1139, # invalid-metaclass +; E1141, # dict-iter-missing-items +; E1142, # await-outside-async +; E1143, # unhashable-member +; E1144, # invalid-slice-step +; E1200, # logging-unsupported-format +; E1201, # logging-format-truncated +; E1205, # logging-too-many-args +; E1206, # logging-too-few-args +; E1300, # bad-format-character +; E1301, # truncated-format-string +; E1302, # mixed-format-string +; E1303, # format-needs-mapping +; E1304, # missing-format-string-key +; E1305, # too-many-format-args +; E1306, # too-few-format-args +; E1307, # bad-string-format-type +; E1310, # bad-str-strip-call +; E1507, # invalid-envvar-value +; E1519, # singledispatch-method +; E1520, # singledispatchmethod-function +; E1700, # yield-inside-async-function +; E1701, # not-async-context-manager +; E2501, # invalid-unicode-codec +; E2502, # bidirectional-unicode +; E2510, # invalid-character-backspace +; E2511, # invalid-character-carriage-return +; E2512, # invalid-character-sub +; E2513, # invalid-character-esc +; E2514, # invalid-character-nul +; E2515, # invalid-character-zero-width-space +; E4702, # modified-iterating-dict +; E4703, # modified-iterating-set +; E6004, # broken-noreturn +; E6005, # broken-collections-callable +; # --- WARNING ------- +; W0012, # unknown-option-value +; W0101, # unreachable +; W0102, # dangerous-default-value +; W0104, # pointless-statement +; W0105, # pointless-string-statement +; W0106, # expression-not-assigned +; W0107, # unnecessary-pass +; W0108, # unnecessary-lambda +; W0109, # duplicate-key +; W0120, # useless-else-on-loop +; W0122, # exec-used +; W0123, # eval-used +; W0124, # confusing-with-statement +; W0125, # using-constant-test +; W0126, # missing-parentheses-for-call-in-test +; W0127, # self-assigning-variable +; W0128, # redeclared-assigned-name +; W0129, # assert-on-string-literal +; W0130, # duplicate-value +; W0131, # named-expr-without-context +; W0141, # bad-builtin +; W0143, # comparison-with-callable +; W0149, # while-used +; W0150, # lost-exception +; W0160, # consider-ternary-expression +; W0177, # nan-comparison +; W0199, # assert-on-tuple +; W0201, # attribute-defined-outside-init +; W0211, # bad-staticmethod-argument +; W0212, # protected-access +; W0221, # arguments-differ +; W0222, # signature-differs +; W0223, # abstract-method +; W0231, # super-init-not-called +; W0233, # non-parent-init-called +; W0236, # invalid-overridden-method +; W0237, # arguments-renamed +; W0238, # unused-private-member +; W0239, # overridden-final-method +; W0240, # subclassed-final-class +; W0244, # redefined-slots-in-subclass +; W0245, # super-without-brackets +; W0246, # useless-parent-delegation +; W0301, # unnecessary-semicolon +; W0311, # bad-indentation +; W0401, # wildcard-import +; W0404, # reimported +; W0406, # import-self +; W0407, # preferred-module +; W0410, # misplaced-future +; W0416, # shadowed-import +; W0511, # fixme +; W0601, # global-variable-undefined +; W0602, # global-variable-not-assigned +; W0603, # global-statement +; W0604, # global-at-module-level +; W0611, # unused-import +; W0612, # unused-variable +; W0613, # unused-argument +; W0614, # unused-wildcard-import +; W0621, # redefined-outer-name +; W0622, # redefined-builtin +; W0631, # undefined-loop-variable +; W0632, # unbalanced-tuple-unpacking +; W0640, # cell-var-from-loop +; W0641, # possibly-unused-variable +; W0642, # self-cls-assignment +; W0644, # unbalanced-dict-unpacking +; W0702, # bare-except +; W0705, # duplicate-except +; W0706, # try-except-raise +; W0707, # raise-missing-from +; W0711, # binary-op-exception +; W0714, # overlapping-except +; W0715, # raising-format-tuple +; W0716, # wrong-exception-operation +; W0717, # too-many-try-statements +; W0718, # broad-exception-caught +; W0719, # broad-exception-raised +; W1113, # keyword-arg-before-vararg +; W1114, # arguments-out-of-order +; W1115, # non-str-assignment-to-dunder-name +; W1116, # isinstance-second-argument-not-valid-type +; W1201, # logging-not-lazy +; W1202, # logging-format-interpolation +; W1203, # logging-fstring-interpolation +; W1300, # bad-format-string-key +; W1301, # unused-format-string-key +; W1302, # bad-format-string +; W1303, # missing-format-argument-key +; W1304, # unused-format-string-argument +; W1305, # format-combined-specification +; W1306, # missing-format-attribute +; W1307, # invalid-format-index +; W1308, # duplicate-string-formatting-argument +; W1309, # f-string-without-interpolation +; W1310, # format-string-without-interpolation +; W1401, # anomalous-backslash-in-string +; W1402, # anomalous-unicode-escape-in-string +; W1404, # implicit-str-concat +; W1405, # inconsistent-quotes +; W1406, # redundant-u-string-prefix +; W1501, # bad-open-mode +; W1502, # boolean-datetime +; W1503, # redundant-unittest-assert +; W1506, # bad-thread-instantiation +; W1507, # shallow-copy-environ +; W1508, # invalid-envvar-default +; W1509, # subprocess-popen-preexec-fn +; W1510, # subprocess-run-check +; W1514, # unspecified-encoding +; W1515, # forgotten-debug-statement +; W1518, # method-cache-max-size-none +; W1641, # eq-without-hash +; W2101, # useless-with-lock +; W2301, # unnecessary-ellipsis +; W2402, # non-ascii-file-name +; W2601, # using-f-string-in-unsupported-version +; W2602, # using-final-decorator-in-unsupported-version +; W2901, # redefined-loop-name +; W3101, # missing-timeout +; W3201, # bad-dunder-name +; W3301, # nested-min-max +; W4701, # modified-iterating-list +; W4901, # deprecated-module +; W4902, # deprecated-method +; W4903, # deprecated-argument +; W4904, # deprecated-class +; W4905, # deprecated-decorator +; W6001, # deprecated-typing-alias +; W9005, # multiple-constructor-doc +; W9006, # missing-raises-doc +; W9008, # redundant-returns-doc +; W9010, # redundant-yields-doc +; W9011, # missing-return-doc +; W9012, # missing-return-type-doc +; W9013, # missing-yield-doc +; W9015, # missing-param-doc +; W9014, # missing-yield-type-doc +; W9016, # missing-type-doc +; W9017, # differing-param-doc +; W9018, # differing-type-doc +; W9019, # useless-param-doc +; W9020, # useless-type-doc +; W9021, # missing-any-param-doc + +; # --- CONVENTION ---- +; C0103, # invalid-name +; C0104, # disallowed-name +; C0105, # typevar-name-incorrect-variance +; C0112, # empty-docstring +; C0113, # unneeded-not +; C0114, # missing-module-docstring +; C0115, # missing-class-docstring +; C0116, # missing-function-docstring +; C0121, # singleton-comparison +; C0123, # unidiomatic-typecheck +; C0131, # typevar-double-variance +; C0132, # typevar-name-mismatch +; C0198, # bad-docstring-quotes +; C0199, # docstring-first-line-empty +; C0200, # consider-using-enumerate +; C0201, # consider-iterating-dictionary +; C0202, # bad-classmethod-argument +; C0203, # bad-mcs-method-argument +; C0204, # bad-mcs-classmethod-argument +; C0205, # single-string-used-for-slots +; C0206, # consider-using-dict-items +; C0207, # use-maxsplit-arg +; C0208, # use-sequence-for-iteration +; C0209, # consider-using-f-string +; C0301, # line-too-long +; C0302, # too-many-lines +; C0303, # trailing-whitespace +; C0304, # missing-final-newline +; C0305, # trailing-newlines +; C0321, # multiple-statements +; C0325, # superfluous-parens +; C0327, # mixed-line-endings +; C0328, # unexpected-line-ending-format +; C0401, # wrong-spelling-in-comment +; C0402, # wrong-spelling-in-docstring +; C0403, # invalid-characters-in-docstring +; C0410, # multiple-imports +; C0411, # wrong-import-order +; C0412, # ungrouped-imports +; C0413, # wrong-import-position +; C0414, # useless-import-alias +; C0415, # import-outside-toplevel +; C0501, # consider-using-any-or-all +; C1802, # use-implicit-booleaness-not-len +; C1803, # use-implicit-booleaness-not-comparison +; C1901, # compare-to-empty-string +; C2001, # compare-to-zero +; C2201, # misplaced-comparison-constant +; C2401, # non-ascii-name +; C2403, # non-ascii-module-import +; C2503, # bad-file-encoding +; C2701, # import-private-name +; C2801, # unnecessary-dunder-call +; C3001, # unnecessary-lambda-assignment +; C3002, # unnecessary-direct-lambda-call +; C3401, # dict-init-mutate + +; # --- REFACTOR ------ +; R0022, # useless-option-value +; R0123, # literal-comparison +; R0124, # comparison-with-itself +; R0133, # comparison-of-constants +; R0202, # no-classmethod-decorator +; R0203, # no-staticmethod-decorator +; R0204, # redefined-variable-type +; R0205, # useless-object-inheritance +; R0206, # property-with-parameters +; R0401, # cyclic-import +; R0402, # consider-using-from-import +; R0801, # duplicate-code +; R0901, # too-many-ancestors +; R0902, # too-many-instance-attributes +; R0903, # too-few-public-methods +; R0904, # too-many-public-methods +; R0911, # too-many-return-statements +; R0912, # too-many-branches +; R0913, # too-many-arguments +; R0914, # too-many-locals +; R0915, # too-many-statements +; R0916, # too-many-boolean-expressions +; R1260, # too-complex +; R1701, # consider-merging-isinstance +; R1702, # too-many-nested-blocks +; R1703, # simplifiable-if-statement +; R1704, # redefined-argument-from-local +; R1705, # no-else-return +; R1706, # consider-using-ternary +; R1707, # trailing-comma-tuple +; R1708, # stop-iteration-return +; R1709, # simplify-boolean-expression +; R1710, # inconsistent-return-statements +; R1711, # useless-return +; R1712, # consider-swap-variables +; R1713, # consider-using-join +; R1714, # consider-using-in +; R1715, # consider-using-get +; R1716, # chained-comparison +; R1717, # consider-using-dict-comprehension +; R1718, # consider-using-set-comprehension +; R1719, # simplifiable-if-expression +; R1720, # no-else-raise +; R1721, # unnecessary-comprehension +; R1722, # consider-using-sys-exit +; R1723, # no-else-break +; R1724, # no-else-continue +; R1725, # super-with-arguments +; R1726, # simplifiable-condition +; R1727, # condition-evals-to-constant +; R1728, # consider-using-generator +; R1729, # use-a-generator +; R1730, # consider-using-min-builtin +; R1731, # consider-using-max-builtin +; R1732, # consider-using-with +; R1733, # unnecessary-dict-index-lookup +; R1734, # use-list-literal +; R1735, # use-dict-literal +; R1736, # unnecessary-list-index-lookup +; R2004, # magic-value-comparison +; R2044, # empty-comment +; R5501, # else-if-used +; R5601, # confusing-consecutive-elif +; R6002, # consider-using-alias +; R6003, # consider-alternative-union-syntax +; R6006, # redundant-typehint-argument +; R6101, # consider-using-namedtuple-or-dataclass +; R6102, # consider-using-tuple +; R6103, # consider-using-assignment-expr +; R6104, # consider-using-augmented-assign +; R6201, # use-set-for-membership +; R6301, # no-self-use + +; # --- INFORMATION --- +; I0001, # raw-checker-failed +; I0010, # bad-inline-option +; I0011, # locally-disabled +; I0013, # file-ignored +; I0020, # suppressed-message +; I0021, # useless-suppression +; I0022, # deprecated-pragma +; I0023, # use-symbolic-message-instead +; I1101, # c-extension-no-member + +disable= + E1101, # no-member + + W0141, # bad-builtin + W0149, # while-used + W0160, # consider-ternary-expression + W0201, # attribute-defined-outside-init + W0212, # protected-access + W0511, # fixme + W0601, # global-variable-undefined + W0602, # global-variable-not-assigned + W0603, # global-statement + W0612, # unused-variable + W0621, # redefined-outer-name + W0631, # undefined-loop-variable + W0703, # broad-except + W0717, # too-many-try-statements + W1202, # logging-format-interpolation + W1203, # logging-fstring-interpolation + W1404, # implicit-str-concat + W2901, # redefined-loop-name + W3101, # missing-timeout + W6001, # deprecated-typing-alias + W9016, # missing-type-do + + C0103, # invalid-name + C0114, # missing-module-docstring + C0115, # missing-class-docstring + C0116, # missing-function-docstring + C0199, # docstring-first-line-empty + C0201, # consider-iterating-dictionary + C0206, # consider-using-dict-items + C0301, # line-too-long + C0415, # import-outside-toplevel + C1901, # compare-to-empty-string + C2001, # compare-to-zero + + R0204, # redifined-variable-type + R0401, # cyclic-import + R0801, # duplicate-code + R0903, # too-few-public-methods + R0902, # too-many-instance-attributes + R0911, # too-many-return-statements + R0912, # too-many-branches + R0913, # too-many-arguments + R0914, # too-many-locals + R0915, # too-many-statements + R0916, # too-many-boolean-expressions + R1260, # too-complex + R1702, # too-many-nested-blocks + R1704, # redefined-argument-from-local + R1710, # inconsistent-return-statements + R5501, # else-if-used + R5601, # confusing-consecutive-elif + R6003, # consider-alternative-union-syntax + R6102, # consider-using-tuple + R6103, # consider-using-assignment-expr + + I0011, # locally-disabled + I0020, # suppressed-message + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= diff --git a/TorrentToMedia.py b/TorrentToMedia.py index a71a31f9..b8b367e5 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -60,7 +60,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp log.debug(f'Determined Directory: {input_directory} | Name: {input_name} | Category: {input_category}') - # auto-detect section + # auto-detect SECTION section = nzb2media.CFG.findsection(input_category).isenabled() if section is None: # Check for user_scripts for 'ALL' and 'UNCAT' if usercat in nzb2media.CATEGORIES: @@ -122,10 +122,9 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp log.debug(f'Scanning files in directory: {input_directory}') - if section_name in ['HeadPhones', 'Lidarr']: - nzb2media.NOFLATTEN.extend( - input_category, - ) # Make sure we preserve folder structure for HeadPhones. + if section_name in {'HeadPhones', 'Lidarr'}: + # Make sure we preserve folder structure for HeadPhones. + nzb2media.NOFLATTEN.extend(input_category) now = datetime.datetime.now() @@ -138,10 +137,10 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp log.debug(f'Found 1 file to process: {input_directory}') else: log.debug(f'Found {len(input_files)} files in {input_directory}') - for inputFile in input_files: - file_path = os.path.dirname(inputFile) - file_name, file_ext = os.path.splitext(os.path.basename(inputFile)) - full_file_name = os.path.basename(inputFile) + for input_file in input_files: + file_path = os.path.dirname(input_file) + file_name, file_ext = os.path.splitext(os.path.basename(input_file)) + full_file_name = os.path.basename(input_file) target_file = nzb2media.os.path.join(output_destination, full_file_name) if input_category in nzb2media.NOFLATTEN: @@ -152,9 +151,9 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp log.debug(f'Setting outputDestination to {os.path.dirname(target_file)} to preserve folder structure') if root == 1: if not found_file: - log.debug(f'Looking for {input_name} in: {inputFile}') + log.debug(f'Looking for {input_name} in: {input_file}') if any([ - nzb2media.sanitize_name(input_name) in nzb2media.sanitize_name(inputFile), + nzb2media.sanitize_name(input_name) in nzb2media.sanitize_name(input_file), nzb2media.sanitize_name(file_name) in nzb2media.sanitize_name(input_name), ]): found_file = True @@ -163,8 +162,8 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp continue if root == 2: - mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(inputFile)) - ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(inputFile)) + mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(input_file)) + ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(input_file)) if not found_file: log.debug('Looking for files with modified/created dates less than 5 minutes old.') @@ -176,10 +175,10 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp if torrent_no_link == 0: try: - nzb2media.copy_link(inputFile, target_file, nzb2media.USE_LINK) + nzb2media.copy_link(input_file, target_file, nzb2media.USE_LINK) nzb2media.remove_read_only(target_file) except Exception: - log.error(f'Failed to link: {inputFile} to {target_file}') + log.error(f'Failed to link: {input_file} to {target_file}') input_name, output_destination = convert_to_ascii(input_name, output_destination) @@ -192,7 +191,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp nzb2media.flatten(output_destination) # Now check if video files exist in destination: - if section_name in ['SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr', 'CouchPotato', 'Radarr', 'Watcher3']: + if section_name in {'SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr', 'CouchPotato', 'Radarr', 'Watcher3'}: num_videos = len( nzb2media.list_media_files(output_destination, media=True, audio=False, meta=False, archives=False), ) @@ -232,7 +231,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp 'Mylar': comics.process, 'Gamez': games.process, } - if input_hash and section_name in ['SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr']: + if input_hash and section_name in {'SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr'}: input_hash = input_hash.upper() processor = process_map[section_name] result = processor( @@ -358,4 +357,4 @@ def main(args): if __name__ == '__main__': - exit(main(sys.argv)) + sys.exit(main(sys.argv)) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index 55b9c90b..a143b621 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -115,7 +115,7 @@ FORCE_CLEAN = None SAFE_MODE = None NOEXTRACTFAILED = None NZB_CLIENT_AGENT = None -SABNZBD_HOST = None +SABNZBD_HOST = '' SABNZBD_PORT = None SABNZBD_APIKEY = None NZB_DEFAULT_DIRECTORY = None @@ -256,7 +256,7 @@ def configure_locale(): except (locale.Error, OSError): pass # For OSes that are poorly configured I'll just randomly force UTF-8 - if not SYS_ENCODING or SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): + if not SYS_ENCODING or SYS_ENCODING in {'ANSI_X3.4-1968', 'US-ASCII', 'ASCII'}: SYS_ENCODING = 'UTF-8' @@ -275,7 +275,7 @@ def configure_migration(): CFG = Config.addnzbget() else: # load newly migrated config log.info(f'Loading config from [{CONFIG_FILE}]') - CFG = Config() + CFG = Config(None) def configure_logging_part_2(): @@ -605,7 +605,7 @@ def configure_transcoder(): ABITRATE3 = transcode_defaults[DEFAULTS]['ABITRATE3'] SCODEC = transcode_defaults[DEFAULTS]['SCODEC'] transcode_defaults = {} # clear memory - if transcode_defaults in ['mp4-scene-release'] and not OUTPUTQUALITYPERCENT: + if transcode_defaults in {'mp4-scene-release'} and not OUTPUTQUALITYPERCENT: OUTPUTQUALITYPERCENT = 100 if VEXTENSION in allow_subs: ALLOWSUBS = 1 @@ -643,7 +643,7 @@ def configure_passwords_file(): def configure_sections(section): global SECTIONS global CATEGORIES - # check for script-defied section and if None set to allow sections + # check for script-defied SECTION and if None set to allow sections SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)] for section, subsections in SECTIONS.items(): CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()]) @@ -662,11 +662,11 @@ def configure_utility_locations(): FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe.exe') SEVENZIP = os.path.join(APP_ROOT, 'nzb2media', 'extractor', 'bin', platform.machine(), '7z.exe') SHOWEXTRACT = int(str(CFG['Windows']['show_extraction']), 0) - if not (os.path.isfile(FFMPEG)): # problem + if not os.path.isfile(FFMPEG): # problem FFMPEG = None log.warning('Failed to locate ffmpeg.exe. Transcoding disabled!') log.warning('Install ffmpeg with x264 support to enable this feature ...') - if not (os.path.isfile(FFPROBE)): + if not os.path.isfile(FFPROBE): FFPROBE = None if CHECK_MEDIA: log.warning('Failed to locate ffprobe.exe. Video corruption detection disabled!') diff --git a/nzb2media/auto_process/common.py b/nzb2media/auto_process/common.py index 8b134659..47544ee5 100644 --- a/nzb2media/auto_process/common.py +++ b/nzb2media/auto_process/common.py @@ -50,7 +50,7 @@ def command_complete(url, params, headers, section): return None -def completed_download_handling(url2, headers, section='MAIN'): +def completed_download_handling(url2, headers): try: response = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: diff --git a/nzb2media/auto_process/movies.py b/nzb2media/auto_process/movies.py index 0460a34e..7b2144ca 100644 --- a/nzb2media/auto_process/movies.py +++ b/nzb2media/auto_process/movies.py @@ -267,7 +267,8 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = remove_dir(dir_name) if not release_id and not media_id: log.error(f'Could not find a downloaded movie in the database matching {input_name}, exiting!') - return ProcessResult(message='{0}: Failed to post-process - Failed download not found in {0}'.format(section), status_code=1) + msg = f'{section}: Failed to post-process - Failed download not found in {section}' + return ProcessResult(message=msg, status_code=1) if release_id: log.debug(f'Setting failed release {input_name} to ignored ...') url = f'{base_url}release.ignore' @@ -277,7 +278,8 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = response = requests.get(url, params=params, verify=False, timeout=(30, 120)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') - return ProcessResult(message='{0}: Failed to post-process - Unable to connect to {0}'.format(section), status_code=1) + msg = f'{section}: Failed to post-process - Unable to connect to {section}' + return ProcessResult(message=msg, status_code=1) result = response.json() if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: log.error(f'Server returned status {response.status_code}') @@ -338,12 +340,12 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = command_status = command_complete(url, params, headers, section) if command_status: log.debug(f'The Scan command return status: {command_status}') - if command_status in ['completed']: + if command_status in {'completed'}: log.debug('The Scan command has completed successfully. Renaming was successful.') return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') - if command_status in ['failed']: + if command_status in {'failed'}: log.debug('The Scan command has failed. Renaming was not successful.') - # return ProcessResult(message='{0}: Failed to post-process {1}'.format(section, input_name), status_code=1) + # return ProcessResult(message='{0}: Failed to post-process {1}'.format(SECTION, input_name), status_code=1) if not os.path.isdir(dir_name): log.debug(f'SUCCESS: Input Directory [{dir_name}] has been processed and removed') return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') @@ -353,7 +355,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = # pause and let CouchPotatoServer/Radarr catch its breath time.sleep(10 * wait_for) # The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now. - if section == 'Radarr' and completed_download_handling(url2, headers, section=section): + if section == 'Radarr' and completed_download_handling(url2, headers): log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.') return ProcessResult.success(f'{section}: Complete DownLoad Handling is enabled. Passing back to {section}') log.warning(f'{input_name} does not appear to have changed status after {wait_for} minutes, Please check your logs.') diff --git a/nzb2media/auto_process/music.py b/nzb2media/auto_process/music.py index 30082390..8353e375 100644 --- a/nzb2media/auto_process/music.py +++ b/nzb2media/auto_process/music.py @@ -67,16 +67,16 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) # if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: - # logger.info('Status shown as failed from Downloader, but valid video files found. Setting as successful.', section) + # logger.info('Status shown as failed from Downloader, but valid video files found. Setting as successful.', SECTION) # status = 0 if status == 0 and section == 'HeadPhones': params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': remote_dir(dir_name) if remote_path else dir_name} res = force_process(params, url, apikey, input_name, dir_name, section, wait_for) - if res.status_code in [0, 1]: + if res.status_code in {0, 1}: return res params = {'apikey': apikey, 'cmd': 'forceProcess', 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0]} res = force_process(params, url, apikey, input_name, dir_name, section, wait_for) - if res.status_code in [0, 1]: + if res.status_code in {0, 1}: return res # The status hasn't changed. uTorrent can resume seeding now. log.warning(f'The music album does not appear to have changed status after {wait_for} minutes. Please check your Logs') @@ -110,7 +110,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = while num < 6: # set up wait_for minutes to see if command completes.. time.sleep(10 * wait_for) command_status = command_complete(url, params, headers, section) - if command_status and command_status in ['completed', 'failed']: + if command_status and command_status in {'completed', 'failed'}: break num += 1 if command_status: @@ -118,12 +118,12 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = if not os.path.exists(dir_name): log.debug(f'The directory {dir_name} has been removed. Renaming was successful.') return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') - if command_status and command_status in ['completed']: + if command_status and command_status in {'completed'}: log.debug('The Scan command has completed successfully. Renaming was successful.') return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') - if command_status and command_status in ['failed']: + if command_status and command_status in {'failed'}: log.debug('The Scan command has failed. Renaming was not successful.') - # return ProcessResult.failure(f'{section}: Failed to post-process {input_name}') + # return ProcessResult.failure(f'{SECTION}: Failed to post-process {input_name}') else: log.debug(f'The Scan command did not return status completed. Passing back to {section} to attempt complete download handling.') return ProcessResult(message=f'{section}: Passing back to {section} to attempt Complete Download Handling', status_code=status) diff --git a/nzb2media/auto_process/tv.py b/nzb2media/auto_process/tv.py index 1540eb73..66ef4fa6 100644 --- a/nzb2media/auto_process/tv.py +++ b/nzb2media/auto_process/tv.py @@ -105,7 +105,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = # Re-raise the error if it wasn't about the directory not existing if error.errno != errno.EEXIST: raise - if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != 'Destination'): + if 'process_method' not in fork_params or (client_agent in {'nzbget', 'sabnzbd'} and nzb_extraction_by != 'Destination'): if input_name: process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) @@ -185,8 +185,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = fork_params['nzbName'] = input_name for param in copy.copy(fork_params): if param == 'failed': - if status > 1: - status = 1 + status = min(status, 1) fork_params[param] = status if 'proc_type' in fork_params: del fork_params['proc_type'] @@ -201,7 +200,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = fork_params[param] = 'manual' if 'proc_type' in fork_params: del fork_params['proc_type'] - if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']: + if param in {'dir_name', 'dir', 'proc_dir', 'process_directory', 'path'}: fork_params[param] = dir_name if remote_path: fork_params[param] = remote_dir(dir_name) @@ -210,12 +209,12 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = fork_params[param] = process_method else: del fork_params[param] - if param in ['force', 'force_replace']: + if param in {'force', 'force_replace'}: if force: fork_params[param] = force else: del fork_params[param] - if param in ['delete_on', 'delete']: + if param in {'delete_on', 'delete'}: if delete_on: fork_params[param] = delete_on else: @@ -228,7 +227,9 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = if param == 'force_next': fork_params[param] = 1 # delete any unused params so we don't pass them to SB by mistake - [fork_params.pop(k) for k, v in list(fork_params.items()) if v is None] + for key, val in list(fork_params.items()): + if val is None: + del fork_params[key] if status == 0: if section == 'NzbDrone' and not apikey: log.info('No Sonarr apikey entered. Processing completed.') @@ -291,7 +292,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = login = f'{web_root}/login' login_params = {'username': username, 'password': password} response = session.get(login, verify=False, timeout=(30, 60)) - if response.status_code in [401, 403] and response.cookies.get('_xsrf'): + if response.status_code in {401, 403} and response.cookies.get('_xsrf'): login_params['_xsrf'] = response.cookies.get('_xsrf') session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) response = session.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) @@ -362,7 +363,7 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = while num < 6: # set up wait_for minutes to see if command completes.. time.sleep(10 * wait_for) command_status = command_complete(url, params, headers, section) - if command_status and command_status in ['completed', 'failed']: + if command_status and command_status in {'completed', 'failed'}: break num += 1 if command_status: @@ -370,14 +371,14 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = if not os.path.exists(dir_name): log.debug(f'The directory {dir_name} has been removed. Renaming was successful.') return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') - if command_status and command_status in ['completed']: + if command_status and command_status in {'completed'}: log.debug('The Scan command has completed successfully. Renaming was successful.') return ProcessResult.success(f'{section}: Successfully post-processed {input_name}') - if command_status and command_status in ['failed']: + if command_status and command_status in {'failed'}: log.debug('The Scan command has failed. Renaming was not successful.') - # return ProcessResult.failure(f'{section}: Failed to post-process {input_name}') + # return ProcessResult.failure(f'{SECTION}: Failed to post-process {input_name}') url2 = nzb2media.utils.common.create_url(scheme, host, port, route2) - if completed_download_handling(url2, headers, section=section): + if completed_download_handling(url2, headers): log.debug(f'The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {section}.') return ProcessResult(message=f'{section}: Complete DownLoad Handling is enabled. Passing back to {section}', status_code=status) log.warning('The Scan command did not return a valid status. Renaming was not successful.') diff --git a/nzb2media/configuration.py b/nzb2media/configuration.py index 535ca605..ebce7c1b 100644 --- a/nzb2media/configuration.py +++ b/nzb2media/configuration.py @@ -39,6 +39,7 @@ class Section(configobj.Section): for key in [k for (k, v) in to_return.items() if not v]: del to_return[key] return to_return + return None def findsection(self, key): to_return = copy.deepcopy(self) @@ -82,7 +83,7 @@ class Section(configobj.Section): class ConfigObj(configobj.ConfigObj, Section): - def __init__(self, infile=None, *args, **kw): + def __init__(self, infile, *args, **kw): if infile is None: infile = nzb2media.CONFIG_FILE super().__init__(os.fspath(infile), *args, **kw) @@ -130,7 +131,7 @@ class ConfigObj(configobj.ConfigObj, Section): if CFG_OLD[section].sections: subsections.update({section: CFG_OLD[section].sections}) for option, value in CFG_OLD[section].items(): - if option in ['category', 'cpsCategory', 'sbCategory', 'srCategory', 'hpCategory', 'mlCategory', 'gzCategory', 'raCategory', 'ndCategory', 'W3Category']: + if option in {'category', 'cpsCategory', 'sbCategory', 'srCategory', 'hpCategory', 'mlCategory', 'gzCategory', 'raCategory', 'ndCategory', 'W3Category'}: if not isinstance(value, list): value = [value] # add subsection @@ -140,31 +141,31 @@ class ConfigObj(configobj.ConfigObj, Section): def cleanup_values(values, section): for option, value in values.items(): - if section in ['CouchPotato']: + if section in {'CouchPotato'}: if option == ['outputDirectory']: CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0] values.pop(option) - if section in ['CouchPotato', 'HeadPhones', 'Gamez', 'Mylar']: - if option in ['username', 'password']: + if section in {'CouchPotato', 'HeadPhones', 'Gamez', 'Mylar'}: + if option in {'username', 'password'}: values.pop(option) - if section in ['Mylar']: + if section in {'Mylar'}: if option == 'wait_for': # remove old format values.pop(option) - if section in ['SickBeard', 'NzbDrone']: + if section in {'SickBeard', 'NzbDrone'}: if option == 'failed_fork': # change this old format values['failed'] = 'auto' values.pop(option) if option == 'outputDirectory': # move this to new location format CFG_NEW['Torrent'][option] = os.path.split(os.path.normpath(value))[0] values.pop(option) - if section in ['Torrent']: - if option in ['compressedExtensions', 'mediaExtensions', 'metaExtensions', 'minSampleSize']: + if section in {'Torrent'}: + if option in {'compressedExtensions', 'mediaExtensions', 'metaExtensions', 'minSampleSize'}: CFG_NEW['Extensions'][option] = value values.pop(option) if option == 'useLink': # Sym links supported now as well. - if value in ['1', 1]: + if value in {'1', 1}: value = 'hard' - elif value in ['0', 0]: + elif value in {'0', 0}: value = 'no' values[option] = value if option == 'forceClean': @@ -173,8 +174,8 @@ class ConfigObj(configobj.ConfigObj, Section): if option == 'qBittorrenHost': # We had a typo that is now fixed. CFG_NEW['Torrent']['qBittorrentHost'] = value values.pop(option) - if section in ['Transcoder']: - if option in ['niceness']: + if section in {'Transcoder'}: + if option in {'niceness'}: CFG_NEW['Posix'][option] = value values.pop(option) if option == 'remote_path': @@ -225,7 +226,7 @@ class ConfigObj(configobj.ConfigObj, Section): process_section(section, subsection) elif section in CFG_OLD.keys(): process_section(section, subsection) - # migrate SiCRKAGE settings from SickBeard section to new dedicated SiCRKAGE section + # migrate SiCRKAGE settings from SickBeard SECTION to new dedicated SiCRKAGE SECTION if CFG_OLD['SickBeard']['tv']['enabled'] and CFG_OLD['SickBeard']['tv']['fork'] == 'sickrage-api': for option, value in CFG_OLD['SickBeard']['tv'].items(): if option in CFG_NEW['SiCKRAGE']['tv']: @@ -233,7 +234,7 @@ class ConfigObj(configobj.ConfigObj, Section): # set API version to 1 if API key detected and no SSO username is set if CFG_NEW['SiCKRAGE']['tv']['apikey'] and not CFG_NEW['SiCKRAGE']['tv']['sso_username']: CFG_NEW['SiCKRAGE']['tv']['api_version'] = 1 - # disable SickBeard section + # disable SickBeard SECTION CFG_NEW['SickBeard']['tv']['enabled'] = 0 CFG_NEW['SickBeard']['tv']['fork'] = 'auto' # create a backup of our old config @@ -247,23 +248,33 @@ class ConfigObj(configobj.ConfigObj, Section): @staticmethod def addnzbget(): # load configs into memory - cfg_new = Config() + cfg_new = Config(None) try: if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ: if os.environ['NZBPO_NDCATEGORY'] == os.environ['NZBPO_SBCATEGORY']: - log.warning('{x} category is set for SickBeard and Sonarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_NDCATEGORY'])) + _cat = os.environ['NZBPO_NDCATEGORY'] + msg = f'{_cat} category is set for SickBeard and Sonarr. Please check your config in NZBGet' + log.warning(msg) if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ: if os.environ['NZBPO_RACATEGORY'] == os.environ['NZBPO_CPSCATEGORY']: - log.warning('{x} category is set for CouchPotato and Radarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_RACATEGORY'])) + _cat = os.environ['NZBPO_RACATEGORY'] + msg = f'{_cat} category is set for CouchPotato and Radarr. Please check your config in NZBGet' + log.warning(msg) if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_W3CATEGORY' in os.environ: if os.environ['NZBPO_RACATEGORY'] == os.environ['NZBPO_W3CATEGORY']: - log.warning('{x} category is set for Watcher3 and Radarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_RACATEGORY'])) + _cat = os.environ['NZBPO_RACATEGORY'] + msg = f'{_cat} category is set for Watcher3 and Radarr. Please check your config in NZBGet' + log.warning(msg) if 'NZBPO_W3CATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ: if os.environ['NZBPO_W3CATEGORY'] == os.environ['NZBPO_CPSCATEGORY']: - log.warning('{x} category is set for CouchPotato and Watcher3. Please check your config in NZBGet'.format(x=os.environ['NZBPO_W3CATEGORY'])) + _cat = os.environ['NZBPO_W3CATEGORY'] + msg = f'{_cat} category is set for CouchPotato and Watcher3. Please check your config in NZBGet' + log.warning(msg) if 'NZBPO_LICATEGORY' in os.environ and 'NZBPO_HPCATEGORY' in os.environ: if os.environ['NZBPO_LICATEGORY'] == os.environ['NZBPO_HPCATEGORY']: - log.warning('{x} category is set for HeadPhones and Lidarr. Please check your config in NZBGet'.format(x=os.environ['NZBPO_LICATEGORY'])) + _cat = os.environ['NZBPO_LICATEGORY'] + msg = f'{_cat} category is set for HeadPhones and Lidarr. Please check your config in NZBGet' + log.warning(msg) section = 'Nzb' key = 'NZBOP_DESTDIR' if key in os.environ: @@ -273,8 +284,8 @@ class ConfigObj(configobj.ConfigObj, Section): section = 'General' env_keys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'REQUIRE_LAN', 'SAFE_MODE', 'NO_EXTRACT_FAILED'] cfg_keys = ['auto_update', 'check_media', 'require_lan', 'safe_mode', 'no_extract_failed'] - for index in range(len(env_keys)): - key = f'NZBPO_{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -282,8 +293,8 @@ class ConfigObj(configobj.ConfigObj, Section): section = 'Network' env_keys = ['MOUNTPOINTS'] cfg_keys = ['mount_points'] - for index in range(len(env_keys)): - key = f'NZBPO_{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -293,8 +304,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY'] cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_CPS{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_CPS{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -311,8 +322,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY'] cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_W3{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_W3{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -329,8 +340,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] cfg_keys = ['enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_SB{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_SB{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -347,8 +358,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'API_VERSION', 'SSO_USERNAME', 'SSO_PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] cfg_keys = ['enabled', 'host', 'port', 'apikey', 'api_version', 'sso_username', 'sso_password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_SR{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_SR{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -365,8 +376,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH', 'DELETE_FAILED'] cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path', 'delete_failed'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_HP{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_HP{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -381,8 +392,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] cfg_keys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_MY{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_MY{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -395,8 +406,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH'] cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_GZ{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_GZ{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -409,8 +420,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'remote_path'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_LL{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_LL{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -424,8 +435,8 @@ class ConfigObj(configobj.ConfigObj, Section): # new cfgKey added for importMode cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'importMode'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_ND{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_ND{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -443,8 +454,8 @@ class ConfigObj(configobj.ConfigObj, Section): # new cfgKey added for importMode cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'omdbapikey', 'importMode'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_RA{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_RA{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -461,8 +472,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_LI{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_LI{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -475,8 +486,8 @@ class ConfigObj(configobj.ConfigObj, Section): section = 'Extensions' env_keys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS'] cfg_keys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] - for index in range(len(env_keys)): - key = f'NZBPO_{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -484,8 +495,8 @@ class ConfigObj(configobj.ConfigObj, Section): section = 'Posix' env_keys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] cfg_keys = ['niceness', 'ionice_class', 'ionice_classdata'] - for index in range(len(env_keys)): - key = f'NZBPO_{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -493,8 +504,8 @@ class ConfigObj(configobj.ConfigObj, Section): section = 'Transcoder' env_keys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', 'OUTPUTVIDEOPRESET', 'OUTPUTVIDEOFRAMERATE', 'OUTPUTVIDEOBITRATE', 'OUTPUTAUDIOCODEC', 'AUDIOCODECALLOW', 'OUTPUTAUDIOBITRATE', 'OUTPUTQUALITYPERCENT', 'GETSUBS', 'OUTPUTAUDIOTRACK2CODEC', 'AUDIOCODEC2ALLOW', 'OUTPUTAUDIOTRACK2BITRATE', 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', 'OUTPUTAUDIOOTHERCHANNELS', 'OUTPUTVIDEORESOLUTION'] cfg_keys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', 'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', 'outputVideoPreset', 'outputVideoFramerate', 'outputVideoBitrate', 'outputAudioCodec', 'AudioCodecAllow', 'outputAudioBitrate', 'outputQualityPercent', 'getSubs', 'outputAudioTrack2Codec', 'AudioCodec2Allow', 'outputAudioTrack2Bitrate', 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels', 'outputVideoResolution'] - for index in range(len(env_keys)): - key = f'NZBPO_{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -502,8 +513,8 @@ class ConfigObj(configobj.ConfigObj, Section): section = 'WakeOnLan' env_keys = ['WAKE', 'HOST', 'PORT', 'MAC'] cfg_keys = ['wake', 'host', 'port', 'mac'] - for index in range(len(env_keys)): - key = f'NZBPO_WOL{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_WOL{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] @@ -513,8 +524,8 @@ class ConfigObj(configobj.ConfigObj, Section): env_keys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] cfg_keys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] if env_cat_key in os.environ: - for index in range(len(env_keys)): - key = f'NZBPO_{env_keys[index]}' + for index, env_key in enumerate(env_keys): + key = f'NZBPO_{env_key}' if key in os.environ: option = cfg_keys[index] value = os.environ[key] diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index 78a07163..6954c6d6 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -61,15 +61,15 @@ def extract(file_path, output_destination): log.warning('EXTRACTOR: No archive extracting programs found, plugin will be disabled') ext = os.path.splitext(file_path) cmd = [] - if ext[1] in ('.gz', '.bz2', '.lzma'): + if ext[1] in {'.gz', '.bz2', '.lzma'}: # Check if this is a tar if os.path.splitext(ext[0])[1] == '.tar': cmd = extract_commands[f'.tar{ext[1]}'] else: # Try gunzip cmd = extract_commands[ext[1]] - elif ext[1] in ('.1', '.01', '.001') and os.path.splitext(ext[0])[1] in ('.rar', '.zip', '.7z'): + elif ext[1] in {'.1', '.01', '.001'} and os.path.splitext(ext[0])[1] in {'.rar', '.zip', '.7z'}: cmd = extract_commands[os.path.splitext(ext[0])[1]] - elif ext[1] in ('.cb7', '.cba', '.cbr', '.cbt', '.cbz'): + elif ext[1] in {'.cb7', '.cba', '.cbr', '.cbt', '.cbz'}: # don't extract these comic book archives. return False else: @@ -81,7 +81,7 @@ def extract(file_path, output_destination): # Create outputDestination folder nzb2media.make_dir(output_destination) if nzb2media.PASSWORDS_FILE and os.path.isfile(os.path.normpath(nzb2media.PASSWORDS_FILE)): - with open(os.path.normpath(nzb2media.PASSWORDS_FILE)) as fin: + with open(os.path.normpath(nzb2media.PASSWORDS_FILE), encoding='utf-8') as fin: passwords = [line.strip() for line in fin] else: passwords = [] @@ -128,8 +128,6 @@ def extract(file_path, output_destination): log.info(f'EXTRACTOR: Extraction was successful for {file_path} to {output_destination} using password: {password}') success = 1 break - else: - continue except Exception: log.error(f'EXTRACTOR: Extraction failed for {file_path}. Could not call command {cmd}') os.chdir(pwd) diff --git a/nzb2media/github_api.py b/nzb2media/github_api.py index c6890fdd..beeced0b 100644 --- a/nzb2media/github_api.py +++ b/nzb2media/github_api.py @@ -11,9 +11,11 @@ class GitHub: self.github_repo = github_repo self.branch = branch - def _access_api(self, path, params=None): + @staticmethod + def _access_api(path, params=None): """Access API at given an API path and optional parameters.""" - url = 'https://api.github.com/{path}'.format(path='/'.join(path)) + route = '/'.join(path) + url = f'https://api.github.com/{route}' data = requests.get(url, params=params, verify=False) return data.json() if data.ok else [] diff --git a/nzb2media/main_db.py b/nzb2media/main_db.py index b8ff7c75..373de1b6 100644 --- a/nzb2media/main_db.py +++ b/nzb2media/main_db.py @@ -25,7 +25,7 @@ def db_filename(filename='nzbtomedia.db', suffix=None): class DBConnection: - def __init__(self, filename='nzbtomedia.db', suffix=None, row_type=None): + def __init__(self, filename='nzbtomedia.db'): self.filename = filename self.connection = sqlite3.connect(db_filename(filename), 20) self.connection.row_factory = sqlite3.Row @@ -151,9 +151,14 @@ class DBConnection: changes_before = self.connection.total_changes items = list(value_dict.values()) + list(key_dict.values()) - self.action('UPDATE {table} SET {params} WHERE {conditions}'.format(table=table_name, params=', '.join(gen_params(value_dict)), conditions=' AND '.join(gen_params(key_dict))), items) + _params = ', '.join(gen_params(value_dict)) + _conditions = ' AND '.join(gen_params(key_dict)) + self.action(f'UPDATE {table_name} SET {_params} WHERE {_conditions}', items) if self.connection.total_changes == changes_before: - self.action('INSERT OR IGNORE INTO {table} ({columns}) VALUES ({values})'.format(table=table_name, columns=', '.join(map(str, value_dict.keys())), values=', '.join(['?'] * len(value_dict.values()))), list(value_dict.values())) + _cols = ', '.join(map(str, value_dict.keys())) + values = list(value_dict.values()) + _vals = ', '.join(['?'] * len(values)) + self.action(f'INSERT OR IGNORE INTO {table_name} ({_cols}) VALUES ({_vals})', values) def table_info(self, table_name): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually diff --git a/nzb2media/managers/pymedusa.py b/nzb2media/managers/pymedusa.py index 841f80b0..a1354f06 100644 --- a/nzb2media/managers/pymedusa.py +++ b/nzb2media/managers/pymedusa.py @@ -58,7 +58,7 @@ class PyMedusaApiV2(SickBeard): # Check for an apikey # This is required with using fork = medusa-apiv2 if not sb_init.apikey: - log.error('For the section SickBeard `fork = medusa-apiv2` you also ' 'need to configure an `apikey`') + log.error('For the SECTION SickBeard `fork = medusa-apiv2` you also ' 'need to configure an `apikey`') raise ValueError('Missing apikey for fork: medusa-apiv2') @property diff --git a/nzb2media/managers/sickbeard.py b/nzb2media/managers/sickbeard.py index ce9d5ce7..f245afcd 100644 --- a/nzb2media/managers/sickbeard.py +++ b/nzb2media/managers/sickbeard.py @@ -49,7 +49,7 @@ class InitSickBeard: self.protocol = 'https://' if self.ssl else 'http://' def auto_fork(self): - # auto-detect correct section + # auto-detect correct SECTION # config settings if nzb2media.FORK_SET: # keep using determined fork for multiple (manual) post-processing @@ -159,7 +159,7 @@ class InitSickBeard: login = nzb2media.utils.common.create_url(scheme=self.protocol, host=self.host, port=self.port, path=f'{self.web_root}/login') login_params = {'username': self.username, 'password': self.password} response = session.get(login, verify=False, timeout=(30, 60)) - if response.status_code in [401, 403] and response.cookies.get('_xsrf'): + if response.status_code in {401, 403} and response.cookies.get('_xsrf'): login_params['_xsrf'] = response.cookies.get('_xsrf') session.post(login, data=login_params, stream=True, verify=False) response = session.get(url, auth=(self.username, self.password), params=api_params, verify=False) @@ -274,8 +274,7 @@ class SickBeard: fork_params['nzbName'] = self.input_name for param in copy.copy(fork_params): if param == 'failed': - if self.failed > 1: - self.failed = 1 + self.failed = min(self.failed, 1) fork_params[param] = self.failed if 'proc_type' in fork_params: del fork_params['proc_type'] @@ -291,7 +290,7 @@ class SickBeard: fork_params[param] = 'manual' if 'proc_type' in fork_params: del fork_params['proc_type'] - if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']: + if param in {'dir_name', 'dir', 'proc_dir', 'process_directory', 'path'}: fork_params[param] = self.dir_name if self.remote_path: fork_params[param] = remote_dir(self.dir_name) @@ -303,12 +302,12 @@ class SickBeard: fork_params[param] = self.process_method else: del fork_params[param] - if param in ['force', 'force_replace']: + if param in {'force', 'force_replace'}: if self.force: fork_params[param] = self.force else: del fork_params[param] - if param in ['delete_on', 'delete']: + if param in {'delete_on', 'delete'}: if self.delete_on: fork_params[param] = self.delete_on else: @@ -326,7 +325,9 @@ class SickBeard: if param == 'force_next': fork_params[param] = 1 # delete any unused params so we don't pass them to SB by mistake - [fork_params.pop(k) for k, v in list(fork_params.items()) if v is None] + for key, value in list(fork_params.items()): + if value is None: + del fork_params[key] def api_call(self) -> ProcessResult: """Perform a base sickbeard api call.""" @@ -339,7 +340,7 @@ class SickBeard: login = nzb2media.utils.common.create_url(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, route) login_params = {'username': self.sb_init.username, 'password': self.sb_init.password} response = self.session.get(login, verify=False, timeout=(30, 60)) - if response.status_code in [401, 403] and response.cookies.get('_xsrf'): + if response.status_code in {401, 403} and response.cookies.get('_xsrf'): login_params['_xsrf'] = response.cookies.get('_xsrf') self.session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) response = self.session.get(self.url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800)) diff --git a/nzb2media/plugins/plex.py b/nzb2media/plugins/plex.py index 6585e273..b955f599 100644 --- a/nzb2media/plugins/plex.py +++ b/nzb2media/plugins/plex.py @@ -26,7 +26,8 @@ def configure_plex(config): def plex_update(category): if nzb2media.FAILED: return - url = '{scheme}://{host}:{port}/library/sections/'.format(scheme='https' if nzb2media.PLEX_SSL else 'http', host=nzb2media.PLEX_HOST, port=nzb2media.PLEX_PORT) + scheme = 'https' if nzb2media.PLEX_SSL else 'http' + url = f'{scheme}://{nzb2media.PLEX_HOST}:{nzb2media.PLEX_PORT}/library/sections/' section = None if not nzb2media.PLEX_SECTION: return @@ -39,4 +40,4 @@ def plex_update(category): requests.get(url, timeout=(60, 120), verify=False) log.debug('Plex Library has been refreshed.') else: - log.debug('Could not identify section for plex update') + log.debug('Could not identify SECTION for plex update') diff --git a/nzb2media/processor/nzb.py b/nzb2media/processor/nzb.py index 0e70ed65..156fee7c 100644 --- a/nzb2media/processor/nzb.py +++ b/nzb2media/processor/nzb.py @@ -44,7 +44,7 @@ def process(input_directory, input_name=None, status=0, client_agent='manual', d control_value_dict = {'input_directory': input_directory1} new_value_dict = {'input_name': input_name1, 'input_hash': download_id, 'input_id': download_id, 'client_agent': client_agent, 'status': 0, 'last_update': datetime.date.today().toordinal()} my_db.upsert('downloads', new_value_dict, control_value_dict) - # auto-detect section + # auto-detect SECTION if input_category is None: input_category = 'UNCAT' usercat = input_category diff --git a/nzb2media/processor/nzbget.py b/nzb2media/processor/nzbget.py index cd5a57a6..d382cb0c 100644 --- a/nzb2media/processor/nzbget.py +++ b/nzb2media/processor/nzbget.py @@ -19,8 +19,7 @@ def parse_download_id(): return os.environ[download_id_key] except KeyError: pass - else: - return '' + return '' def parse_failure_link(): @@ -40,7 +39,7 @@ def _parse_total_status(): def _parse_par_status(): """Parse nzbget par status from environment.""" par_status = os.environ['NZBPP_PARSTATUS'] - if par_status == '1' or par_status == '4': + if par_status in {'1', '4'}: log.warning('Par-repair failed, setting status \'failed\'') return 1 return 0 diff --git a/nzb2media/scene_exceptions.py b/nzb2media/scene_exceptions.py index fea2486c..47079fe5 100644 --- a/nzb2media/scene_exceptions.py +++ b/nzb2media/scene_exceptions.py @@ -84,7 +84,7 @@ def replace_filename(filename, dirname, name): return newfile_path -def reverse_filename(filename, dirname, name): +def reverse_filename(filename, dirname): head, file_extension = os.path.splitext(os.path.basename(filename)) na_parts = season_pattern.search(head) if na_parts is not None: @@ -118,7 +118,7 @@ def rename_script(dirname): dirname = directory break if rename_file: - with open(rename_file) as fin: + with open(rename_file, encoding='utf-8') as fin: rename_lines = [line.strip() for line in fin] for line in rename_lines: if re.search('^(mv|Move)', line, re.IGNORECASE): diff --git a/nzb2media/torrent/utorrent.py b/nzb2media/torrent/utorrent.py index 328693fc..58f14478 100644 --- a/nzb2media/torrent/utorrent.py +++ b/nzb2media/torrent/utorrent.py @@ -20,5 +20,6 @@ def configure_client(): client = UTorrentClient(web_ui, user, password) except Exception: log.error('Failed to connect to uTorrent') + return None else: return client diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index 6a2b6700..755f34b7 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -26,7 +26,7 @@ __author__ = 'Justin' def is_video_good(video: pathlib.Path, status, require_lan=None): file_ext = video.suffix disable = False - if file_ext not in nzb2media.MEDIA_CONTAINER or not nzb2media.FFPROBE or not nzb2media.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and nzb2media.NOEXTRACTFAILED): + if file_ext not in nzb2media.MEDIA_CONTAINER or not nzb2media.FFPROBE or not nzb2media.CHECK_MEDIA or file_ext in {'.iso'} or (status > 0 and nzb2media.NOEXTRACTFAILED): disable = True else: test_details, res = get_video_details(nzb2media.TEST_FILE) @@ -205,7 +205,7 @@ def build_commands(file, new_dir, movie_name): video_cmd.extend(['-level', str(nzb2media.VLEVEL)]) if nzb2media.ACODEC: audio_cmd.extend(['-c:a', nzb2media.ACODEC]) - if nzb2media.ACODEC in ['aac', 'dts']: + if nzb2media.ACODEC in {'aac', 'dts'}: # Allow users to use the experimental AAC codec that's built into recent versions of ffmpeg audio_cmd.extend(['-strict', '-2']) else: @@ -240,17 +240,21 @@ def build_commands(file, new_dir, movie_name): video_cmd.extend(['-c:v', 'copy']) else: video_cmd.extend(['-c:v', nzb2media.VCODEC]) - if nzb2media.VFRAMERATE and not (nzb2media.VFRAMERATE * 0.999 <= frame_rate <= nzb2media.VFRAMERATE * 1.001): + if nzb2media.VFRAMERATE and not nzb2media.VFRAMERATE * 0.999 <= frame_rate <= nzb2media.VFRAMERATE * 1.001: video_cmd.extend(['-r', str(nzb2media.VFRAMERATE)]) if scale: w_scale = width / float(scale.split(':')[0]) h_scale = height / float(scale.split(':')[1]) if w_scale > h_scale: # widescreen, Scale by width only. - scale = '{width}:{height}'.format(width=scale.split(':')[0], height=int((height / w_scale) / 2) * 2) + _width = scale.split(':')[0] + _height = int((height / w_scale) / 2) * 2 + scale = f'{_width}:{_height}' if w_scale > 1: video_cmd.extend(['-vf', f'scale={scale}']) else: # lower or matching ratio, scale by height only. - scale = '{width}:{height}'.format(width=int((width / h_scale) / 2) * 2, height=scale.split(':')[1]) + _width = int((width / h_scale) / 2) * 2 + _height = scale.split(':')[1] + scale = f'{_width}:{_height}' if h_scale > 1: video_cmd.extend(['-vf', f'scale={scale}']) if nzb2media.VBITRATE: @@ -266,7 +270,8 @@ def build_commands(file, new_dir, movie_name): video_cmd[1] = nzb2media.VCODEC if nzb2media.VCODEC == 'copy': # force copy. therefore ignore all other video transcoding. video_cmd = ['-c:v', 'copy'] - map_cmd.extend(['-map', '0:{index}'.format(index=video['index'])]) + _index = video['index'] + map_cmd.extend(['-map', f'0:{_index}']) break # Only one video needed used_audio = 0 a_mapped = [] @@ -297,27 +302,31 @@ def build_commands(file, new_dir, movie_name): except Exception: audio4 = [] if audio2: # right (or only) language and codec... - map_cmd.extend(['-map', '0:{index}'.format(index=audio2[0]['index'])]) + _index = audio2[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio2[0]['index']]) bitrate = int(float(audio2[0].get('bit_rate', 0))) / 1000 channels = int(float(audio2[0].get('channels', 0))) audio_cmd.extend([f'-c:a:{used_audio}', 'copy']) elif audio1: # right (or only) language, wrong codec. - map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]['index'])]) + _index = audio1[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio1[0]['index']]) bitrate = int(float(audio1[0].get('bit_rate', 0))) / 1000 channels = int(float(audio1[0].get('channels', 0))) audio_cmd.extend([f'-c:a:{used_audio}', nzb2media.ACODEC if nzb2media.ACODEC else 'copy']) elif audio4: # wrong language, right codec. - map_cmd.extend(['-map', '0:{index}'.format(index=audio4[0]['index'])]) + _index = audio4[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio4[0]['index']]) bitrate = int(float(audio4[0].get('bit_rate', 0))) / 1000 channels = int(float(audio4[0].get('channels', 0))) audio_cmd.extend([f'-c:a:{used_audio}', 'copy']) elif audio3: # wrong language, wrong codec. just pick the default audio track - map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]['index'])]) + _index = audio3[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio3[0]['index']]) bitrate = int(float(audio3[0].get('bit_rate', 0))) / 1000 channels = int(float(audio3[0].get('channels', 0))) @@ -326,7 +335,7 @@ def build_commands(file, new_dir, movie_name): audio_cmd.extend([f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS)]) if audio_cmd[1] == 'copy': audio_cmd[1] = nzb2media.ACODEC - if nzb2media.ABITRATE and not (nzb2media.ABITRATE * 0.9 < bitrate < nzb2media.ABITRATE * 1.1): + if nzb2media.ABITRATE and not nzb2media.ABITRATE * 0.9 < bitrate < nzb2media.ABITRATE * 1.1: audio_cmd.extend([f'-b:a:{used_audio}', str(nzb2media.ABITRATE)]) if audio_cmd[1] == 'copy': audio_cmd[1] = nzb2media.ACODEC @@ -334,7 +343,7 @@ def build_commands(file, new_dir, movie_name): audio_cmd.extend([f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)]) if audio_cmd[1] == 'copy': audio_cmd[1] = nzb2media.ACODEC - if audio_cmd[1] in ['aac', 'dts']: + if audio_cmd[1] in {'aac', 'dts'}: audio_cmd[2:2] = ['-strict', '-2'] if nzb2media.ACODEC2_ALLOW: used_audio += 1 @@ -347,13 +356,15 @@ def build_commands(file, new_dir, movie_name): except Exception: audio6 = [] if audio5: # right language and codec. - map_cmd.extend(['-map', '0:{index}'.format(index=audio5[0]['index'])]) + _index = audio5[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio5[0]['index']]) bitrate = int(float(audio5[0].get('bit_rate', 0))) / 1000 channels = int(float(audio5[0].get('channels', 0))) audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) elif audio1: # right language wrong codec. - map_cmd.extend(['-map', '0:{index}'.format(index=audio1[0]['index'])]) + _index = audio1[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio1[0]['index']]) bitrate = int(float(audio1[0].get('bit_rate', 0))) / 1000 channels = int(float(audio1[0].get('channels', 0))) @@ -362,14 +373,16 @@ def build_commands(file, new_dir, movie_name): else: audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) elif audio6: # wrong language, right codec - map_cmd.extend(['-map', '0:{index}'.format(index=audio6[0]['index'])]) + _index = audio6[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio6[0]['index']]) bitrate = int(float(audio6[0].get('bit_rate', 0))) / 1000 channels = int(float(audio6[0].get('channels', 0))) audio_cmd2.extend([f'-c:a:{used_audio}', 'copy']) elif audio3: # wrong language, wrong codec just pick the default audio track - map_cmd.extend(['-map', '0:{index}'.format(index=audio3[0]['index'])]) + _inded = audio3[0]['index'] + map_cmd.extend(['-map', f'0:{_index}']) a_mapped.extend([audio3[0]['index']]) bitrate = int(float(audio3[0].get('bit_rate', 0))) / 1000 channels = int(float(audio3[0].get('channels', 0))) @@ -381,7 +394,7 @@ def build_commands(file, new_dir, movie_name): audio_cmd2.extend([f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS2)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = nzb2media.ACODEC2 - if nzb2media.ABITRATE2 and not (nzb2media.ABITRATE2 * 0.9 < bitrate < nzb2media.ABITRATE2 * 1.1): + if nzb2media.ABITRATE2 and not nzb2media.ABITRATE2 * 0.9 < bitrate < nzb2media.ABITRATE2 * 1.1: audio_cmd2.extend([f'-b:a:{used_audio}', str(nzb2media.ABITRATE2)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = nzb2media.ACODEC2 @@ -389,7 +402,7 @@ def build_commands(file, new_dir, movie_name): audio_cmd2.extend([f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)]) if audio_cmd2[1] == 'copy': audio_cmd2[1] = nzb2media.ACODEC2 - if audio_cmd2[1] in ['aac', 'dts']: + if audio_cmd2[1] in {'aac', 'dts'}: audio_cmd2[2:2] = ['-strict', '-2'] if a_mapped[1] == a_mapped[0] and audio_cmd2[1:] == audio_cmd[1:]: # check for duplicate output track. @@ -403,7 +416,8 @@ def build_commands(file, new_dir, movie_name): if audio['index'] in a_mapped: continue used_audio += 1 - map_cmd.extend(['-map', '0:{index}'.format(index=audio['index'])]) + _index = audio['index'] + map_cmd.extend(['-map', f'0:{_index}']) audio_cmd3 = [] bitrate = int(float(audio.get('bit_rate', 0))) / 1000 channels = int(float(audio.get('channels', 0))) @@ -418,7 +432,7 @@ def build_commands(file, new_dir, movie_name): audio_cmd3.extend([f'-ac:a:{used_audio}', str(nzb2media.ACHANNELS3)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = nzb2media.ACODEC3 - if nzb2media.ABITRATE3 and not (nzb2media.ABITRATE3 * 0.9 < bitrate < nzb2media.ABITRATE3 * 1.1): + if nzb2media.ABITRATE3 and not nzb2media.ABITRATE3 * 0.9 < bitrate < nzb2media.ABITRATE3 * 1.1: audio_cmd3.extend([f'-b:a:{used_audio}', str(nzb2media.ABITRATE3)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = nzb2media.ACODEC3 @@ -426,7 +440,7 @@ def build_commands(file, new_dir, movie_name): audio_cmd3.extend([f'-q:a:{used_audio}', str(nzb2media.OUTPUTQUALITYPERCENT)]) if audio_cmd3[1] == 'copy': audio_cmd3[1] = nzb2media.ACODEC3 - if audio_cmd3[1] in ['aac', 'dts']: + if audio_cmd3[1] in {'aac', 'dts'}: audio_cmd3[2:2] = ['-strict', '-2'] audio_cmd.extend(audio_cmd3) s_mapped = [] @@ -445,17 +459,18 @@ def build_commands(file, new_dir, movie_name): for sub in subs1: if nzb2media.BURN and not burnt and os.path.isfile(input_file): subloc = 0 - for index in range(len(sub_streams)): - if sub_streams[index]['index'] == sub['index']: + for index, sub_stream in enumerate(sub_streams): + if sub_stream['index'] == sub['index']: subloc = index break video_cmd.extend(['-vf', f'subtitles={input_file}:si={subloc}']) burnt = 1 if not nzb2media.ALLOWSUBS: break - if sub['codec_name'] in ['dvd_subtitle', 'VobSub'] and nzb2media.SCODEC == 'mov_text': # We can't convert these. - continue - map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])]) + if sub['codec_name'] in {'dvd_subtitle', 'VobSub'} and nzb2media.SCODEC == 'mov_text': + continue # We can't convert these. + _inded = sub['index'] + map_cmd.extend(['-map', f'0:{_index}']) s_mapped.extend([sub['index']]) if nzb2media.SINCLUDE: for sub in sub_streams: @@ -463,9 +478,10 @@ def build_commands(file, new_dir, movie_name): break if sub['index'] in s_mapped: continue - if sub['codec_name'] in ['dvd_subtitle', 'VobSub'] and nzb2media.SCODEC == 'mov_text': # We can't convert these. + if sub['codec_name'] in {'dvd_subtitle', 'VobSub'} and nzb2media.SCODEC == 'mov_text': # We can't convert these. continue - map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])]) + _index = sub['index'] + map_cmd.extend(['-map', f'0:{_index}']) s_mapped.extend([sub['index']]) if nzb2media.OUTPUTFASTSTART: other_cmd.extend(['-movflags', '+faststart']) @@ -580,7 +596,7 @@ def extract_subs(file, newfile_path): log.error('Extracting subtitles has failed') -def process_list(iterable, new_dir): +def process_list(iterable): rem_list = [] new_list = [] combine = [] @@ -589,9 +605,9 @@ def process_list(iterable, new_dir): success = True for item in iterable: ext = os.path.splitext(item)[1].lower() - if ext in ['.iso', '.bin', '.img'] and ext not in nzb2media.IGNOREEXTENSIONS: + if ext in {'.iso', '.bin', '.img'} and ext not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Attempting to rip disk image: {item}') - new_list.extend(rip_iso(item, new_dir)) + new_list.extend(rip_iso(item)) rem_list.append(item) elif re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', item) and '.vob' not in nzb2media.IGNOREEXTENSIONS: log.debug(f'Found VIDEO_TS image file: {item}') @@ -638,7 +654,7 @@ def process_list(iterable, new_dir): return iterable, rem_list, new_list, success -def mount_iso(item, new_dir): # Currently only supports Linux Mount when permissions allow. +def mount_iso(item): # Currently only supports Linux Mount when permissions allow. if platform.system() == 'Windows': log.error(f'No mounting options available under Windows for image file {item}') return [] @@ -670,7 +686,7 @@ def mount_iso(item, new_dir): # Currently only supports Linux Mount when permis return ['failure'] # If we got here, nothing matched our criteria -def rip_iso(item, new_dir): +def rip_iso(item): new_files = [] failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. @@ -678,7 +694,7 @@ def rip_iso(item, new_dir): log.debug(f'No 7zip installed. Attempting to mount image file {item}') try: # Currently only works for Linux. - new_files = mount_iso(item, new_dir) + new_files = mount_iso(item) except Exception: log.error(f'Failed to mount and extract from image file {item}') new_files = [failure_dir] @@ -730,7 +746,7 @@ def rip_iso(item, new_dir): new_files.append({item: {'name': name, 'files': combined}}) if not new_files: log.error(f'No VIDEO_TS or BDMV/SOURCE folder found in image file. Attempting to mount and scan {item}') - new_files = mount_iso(item, new_dir) + new_files = mount_iso(item) except Exception: log.error(f'Failed to extract from image file {item}') new_files = [failure_dir] @@ -832,7 +848,7 @@ def transcode_directory(dir_name): new_dir = dir_name movie_name = os.path.splitext(os.path.split(dir_name)[1])[0] file_list = nzb2media.list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) - file_list, rem_list, new_list, success = process_list(file_list, new_dir) + file_list, rem_list, new_list, success = process_list(file_list) if not success: return 1, dir_name for file in file_list: diff --git a/nzb2media/user_scripts.py b/nzb2media/user_scripts.py index 4929817b..6262ca59 100644 --- a/nzb2media/user_scripts.py +++ b/nzb2media/user_scripts.py @@ -66,24 +66,22 @@ def external_script(output_destination, torrent_name, torrent_label, settings): if param == 'FN': command.append(f'{file}') continue - elif param == 'FP': + if param == 'FP': command.append(f'{file_path}') continue - elif param == 'TN': + if param == 'TN': command.append(f'{torrent_name}') continue - elif param == 'TL': + if param == 'TL': command.append(f'{torrent_label}') continue - elif param == 'DN': + if param == 'DN': if nzb2media.USER_SCRIPT_RUNONCE == 1: command.append(f'{output_destination}') else: command.append(f'{dirpath}') continue - else: - command.append(param) - continue + command.append(param) cmd = '' for item in command: cmd = f'{cmd} {item}' diff --git a/nzb2media/utils/common.py b/nzb2media/utils/common.py index da7ec2b7..85622683 100644 --- a/nzb2media/utils/common.py +++ b/nzb2media/utils/common.py @@ -35,9 +35,9 @@ def process_dir(path, link): dir_contents = os.listdir(path) # search for single files and move them into their own folder for post-processing # Generate list of sync files - sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in ['.!sync', '.bts']) + sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in {'.!sync', '.bts'}) # Generate a list of file paths - filepaths = (os.path.join(path, item) for item in dir_contents if item not in ['Thumbs.db', 'thumbs.db']) + filepaths = (os.path.join(path, item) for item in dir_contents if item not in {'Thumbs.db', 'thumbs.db'}) # Generate a list of media files mediafiles = (item for item in filepaths if os.path.isfile(item)) if not any(sync_files): @@ -53,7 +53,7 @@ def process_dir(path, link): directories = (path for path in path_contents if os.path.isdir(path)) for directory in directories: dir_contents = os.listdir(directory) - sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in ['.!sync', '.bts']) + sync_files = (item for item in dir_contents if os.path.splitext(item)[1] in {'.!sync', '.bts'}) if not any(dir_contents) or any(sync_files): continue folders.append(directory) diff --git a/nzb2media/utils/encoding.py b/nzb2media/utils/encoding.py index 79c2f399..20400066 100644 --- a/nzb2media/utils/encoding.py +++ b/nzb2media/utils/encoding.py @@ -33,7 +33,7 @@ def char_replace(name_in): encoding = 'cp850' break # Detect ISO-8859-15 - elif (character >= 0xA6) & (character <= 0xFF): + if (character >= 0xA6) & (character <= 0xFF): encoding = 'iso-8859-15' break else: @@ -42,11 +42,11 @@ def char_replace(name_in): encoding = 'utf-8' break # Detect CP850 - elif (character >= 0x80) & (character <= 0xA5): + if (character >= 0x80) & (character <= 0xA5): encoding = 'cp850' break # Detect ISO-8859-15 - elif (character >= 0xA6) & (character <= 0xFF): + if (character >= 0xA6) & (character <= 0xFF): encoding = 'iso-8859-15' break if encoding: diff --git a/nzb2media/utils/files.py b/nzb2media/utils/files.py index e773f869..92ab428e 100644 --- a/nzb2media/utils/files.py +++ b/nzb2media/utils/files.py @@ -67,7 +67,7 @@ def move_file(filename, path, link): copy_link(filename, newfile, link) -def is_min_size(input_name, min_size): +def is_min_size(input_name, min_size) -> bool: file_name, file_ext = os.path.splitext(os.path.basename(input_name)) # audio files we need to check directory size not file size input_size = os.path.getsize(input_name) @@ -80,6 +80,7 @@ def is_min_size(input_name, min_size): # Ignore files under a certain size if input_size > min_size * 1048576: return True + return False def is_archive_file(filename): diff --git a/nzb2media/utils/identification.py b/nzb2media/utils/identification.py index f51e6021..424c31b6 100644 --- a/nzb2media/utils/identification.py +++ b/nzb2media/utils/identification.py @@ -13,8 +13,8 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def find_imdbid(dir_name, input_name, omdb_api_key): - imdbid = None +def find_imdbid(dir_name, input_name, omdb_api_key) -> str: + imdbid = '' log.info(f'Attemping imdbID lookup for {input_name}') # find imdbid in dirName log.info('Searching folder and file names for imdbID ...') @@ -56,13 +56,13 @@ def find_imdbid(dir_name, input_name, omdb_api_key): url = 'http://www.omdbapi.com' if not omdb_api_key: log.info('Unable to determine imdbID: No api key provided for omdbapi.com.') - return + return '' log.debug(f'Opening URL: {url}') try: response = requests.get(url, params={'apikey': omdb_api_key, 'y': year, 't': title}, verify=False, timeout=(60, 300)) except requests.ConnectionError: log.error(f'Unable to open URL {url}') - return + return '' try: results = response.json() except Exception: diff --git a/nzb2media/utils/links.py b/nzb2media/utils/links.py index 789ce3d8..39e98272 100644 --- a/nzb2media/utils/links.py +++ b/nzb2media/utils/links.py @@ -10,14 +10,15 @@ from nzb2media.utils.paths import make_dir log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) + try: from jaraco.windows.filesystem import islink, readlink except ImportError: - if os.name == 'nt': - raise - else: + if os.name != 'nt': from os.path import islink from os import readlink + else: + raise def copy_link(src, target_link, use_link): diff --git a/nzb2media/utils/naming.py b/nzb2media/utils/naming.py index 9f5c3cb3..acecdfd4 100644 --- a/nzb2media/utils/naming.py +++ b/nzb2media/utils/naming.py @@ -39,7 +39,8 @@ def clean_file_name(filename): return filename.strip() -def is_sample(input_name): +def is_sample(input_name) -> bool: # Ignore 'sample' in files if re.search('(^|[\\W_])sample\\d*[\\W_]', input_name.lower()): return True + return False diff --git a/nzb2media/utils/parsers.py b/nzb2media/utils/parsers.py index ca3fd996..b85f1a68 100644 --- a/nzb2media/utils/parsers.py +++ b/nzb2media/utils/parsers.py @@ -67,7 +67,7 @@ def parse_deluge(args): return input_directory, input_name, input_category, input_hash, input_id -def parse_transmission(args): +def parse_transmission(): # Transmission usage: call TorrenToMedia.py (%TR_TORRENT_DIR% %TR_TORRENT_NAME% is passed on as environmental variables) input_directory = os.path.normpath(os.getenv('TR_TORRENT_DIR')) input_name = os.getenv('TR_TORRENT_NAME') @@ -77,7 +77,7 @@ def parse_transmission(args): return input_directory, input_name, input_category, input_hash, input_id -def parse_synods(args): +def parse_synods(): # Synology/Transmission usage: call TorrenToMedia.py (%TR_TORRENT_DIR% %TR_TORRENT_NAME% is passed on as environmental variables) input_directory = '' input_id = '' diff --git a/nzb2media/utils/paths.py b/nzb2media/utils/paths.py index 4a7f48ee..b1928a58 100644 --- a/nzb2media/utils/paths.py +++ b/nzb2media/utils/paths.py @@ -26,7 +26,7 @@ def onerror(func, path, exc_info): os.chmod(path, stat.S_IWUSR) func(path) else: - raise Exception + raise Exception(exc_info) def remove_dir(dir_name): diff --git a/nzb2media/utils/processes.py b/nzb2media/utils/processes.py index 7de910d7..6cbe2681 100644 --- a/nzb2media/utils/processes.py +++ b/nzb2media/utils/processes.py @@ -10,9 +10,16 @@ import typing import nzb2media if os.name == 'nt': + # pylint: disable-next=no-name-in-module from win32event import CreateMutex - from win32api import CloseHandle, GetLastError + + # pylint: disable-next=no-name-in-module + from win32api import CloseHandle + + # pylint: disable-next=no-name-in-module + from win32api import GetLastError from winerror import ERROR_ALREADY_EXISTS + log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) @@ -100,7 +107,7 @@ def restart(): install_type = nzb2media.version_check.CheckVersion().install_type status = 0 popen_list = [] - if install_type in ('git', 'source'): + if install_type in {'git', 'source'}: popen_list = [sys.executable, nzb2media.APP_FILENAME] if popen_list: popen_list += nzb2media.SYS_ARGV diff --git a/nzb2media/utils/torrent.py b/nzb2media/utils/torrent.py index a5049a7e..e136efa9 100644 --- a/nzb2media/utils/torrent.py +++ b/nzb2media/utils/torrent.py @@ -15,14 +15,15 @@ log.addHandler(logging.NullHandler()) torrent_clients = {'deluge': deluge, 'qbittorrent': qbittorrent, 'transmission': transmission, 'utorrent': utorrent, 'synods': synology} -def create_torrent_class(client_agent): +def create_torrent_class(client_agent) -> object | None: if nzb2media.APP_NAME != 'TorrentToMedia.py': - return # Skip loading Torrent for NZBs. + return None # Skip loading Torrent for NZBs. try: agent = torrent_clients[client_agent] except KeyError: - return + return None else: + deluge.configure_client() return agent.configure_client() diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index 079cf5b6..dab73bef 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -38,7 +38,8 @@ class CheckVersion: def run(self): self.check_for_new_version() - def find_install_type(self): + @staticmethod + def find_install_type(): """ Determine how this copy of SB was installed. returns: type of installation. Possible values are: @@ -77,13 +78,16 @@ class CheckVersion: class UpdateManager: - def get_github_repo_user(self): + @staticmethod + def get_github_repo_user(): return nzb2media.GIT_USER - def get_github_repo(self): + @staticmethod + def get_github_repo(): return nzb2media.GIT_REPO - def get_github_branch(self): + @staticmethod + def get_github_branch(): return nzb2media.GIT_BRANCH @@ -98,7 +102,8 @@ class GitUpdateManager(UpdateManager): self._num_commits_behind = 0 self._num_commits_ahead = 0 - def _git_error(self): + @staticmethod + def _git_error(): log.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') def _find_working_git(self): @@ -134,9 +139,10 @@ class GitUpdateManager(UpdateManager): log.debug('Unable to find your git executable - Set git_path in your autoProcessMedia.cfg OR delete your .git folder and run from source to enable updates.') return None - def _run_git(self, git_path, args): - result = None - proc_err = None + @staticmethod + def _run_git(git_path, args): + result = '' + proc_err = '' if not git_path: log.debug('No git specified, can\'t use git commands') proc_status = 1 @@ -157,7 +163,7 @@ class GitUpdateManager(UpdateManager): if proc_status == 0: log.debug(f'{cmd} : returned successful') proc_status = 0 - elif nzb2media.LOG_GIT and proc_status in (1, 128): + elif nzb2media.LOG_GIT and proc_status in {1, 128}: log.debug(f'{cmd} returned : {result}') else: if nzb2media.LOG_GIT: @@ -234,7 +240,8 @@ class GitUpdateManager(UpdateManager): if self._num_commits_ahead: log.error(f'Local branch is ahead of {self.branch}. Automatic update not possible.') elif self._num_commits_behind: - log.info('There is a newer version available (you\'re {x} commit{s} behind)'.format(x=self._num_commits_behind, s='s' if self._num_commits_behind > 1 else '')) + _plural = 's' if self._num_commits_behind > 1 else '' + log.info(f'There is a newer version available (you\'re {self._num_commits_behind} commit{_plural} behind)') else: return @@ -280,7 +287,7 @@ class SourceUpdateManager(UpdateManager): self._cur_commit_hash = None return try: - with open(version_file) as fin: + with open(version_file, encoding='utf-8') as fin: self._cur_commit_hash = fin.read().strip(' \n\r') except OSError as error: log.debug(f'Unable to open \'version.txt\': {error}') @@ -338,7 +345,8 @@ class SourceUpdateManager(UpdateManager): if not self._cur_commit_hash: log.error('Unknown current version number, don\'t know if we should update or not') elif self._num_commits_behind > 0: - log.info('There is a newer version available (you\'re {x} commit{s} behind)'.format(x=self._num_commits_behind, s='s' if self._num_commits_behind > 1 else '')) + _plural = 's' if self._num_commits_behind > 1 else '' + log.info(f'There is a newer version available (you\'re {self._num_commits_behind} commit{_plural} behind)') else: return @@ -387,7 +395,7 @@ class SourceUpdateManager(UpdateManager): # Avoid DLL access problem on WIN32/64 # These files needing to be updated manually # or find a way to kill the access from memory - if curfile in ('unrar.dll', 'unrar64.dll'): + if curfile in {'unrar.dll', 'unrar64.dll'}: try: os.chmod(new_path, stat.S_IWRITE) os.remove(new_path) @@ -402,7 +410,7 @@ class SourceUpdateManager(UpdateManager): os.renames(old_path, new_path) # update version.txt with commit hash try: - with open(version_path, 'w') as ver_file: + with open(version_path, 'w', encoding='utf-8') as ver_file: ver_file.write(self._newest_commit_hash) except OSError as error: log.error(f'Unable to write version file, update not complete: {error}') diff --git a/nzbToCouchPotato.py b/nzbToCouchPotato.py index b7e4efc4..c3b2c2ee 100755 --- a/nzbToCouchPotato.py +++ b/nzbToCouchPotato.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'CouchPotato' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'CouchPotato' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToGamez.py b/nzbToGamez.py index 3873a9d5..445c0342 100755 --- a/nzbToGamez.py +++ b/nzbToGamez.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'Gamez' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'Gamez' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToHeadPhones.py b/nzbToHeadPhones.py index 8264cfe0..bd52ae4a 100755 --- a/nzbToHeadPhones.py +++ b/nzbToHeadPhones.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'HeadPhones' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'HeadPhones' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToLazyLibrarian.py b/nzbToLazyLibrarian.py index 22333ca2..e87dfef1 100755 --- a/nzbToLazyLibrarian.py +++ b/nzbToLazyLibrarian.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'LazyLibrarian' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'LazyLibrarian' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToLidarr.py b/nzbToLidarr.py index 92b81417..07ca4ef5 100755 --- a/nzbToLidarr.py +++ b/nzbToLidarr.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'Lidarr' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'Lidarr' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToMedia.py b/nzbToMedia.py index 84cb881f..385ca0f3 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -65,4 +65,4 @@ def main(args, section=None): if __name__ == '__main__': - exit(main(sys.argv)) + sys.exit(main(sys.argv)) diff --git a/nzbToMylar.py b/nzbToMylar.py index 9cdb62c2..dfe46e7f 100755 --- a/nzbToMylar.py +++ b/nzbToMylar.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'Mylar' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'Mylar' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToNzbDrone.py b/nzbToNzbDrone.py index c04a3f11..7d7af9c8 100755 --- a/nzbToNzbDrone.py +++ b/nzbToNzbDrone.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'NzbDrone' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'NzbDrone' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToRadarr.py b/nzbToRadarr.py index 9ac97e8b..b1157a43 100755 --- a/nzbToRadarr.py +++ b/nzbToRadarr.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'Radarr' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'Radarr' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToSiCKRAGE.py b/nzbToSiCKRAGE.py index 10f9cd84..987adc86 100755 --- a/nzbToSiCKRAGE.py +++ b/nzbToSiCKRAGE.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'SiCKRAGE' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'SiCKRAGE' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToSickBeard.py b/nzbToSickBeard.py index 5fcfd19f..adc0b10a 100755 --- a/nzbToSickBeard.py +++ b/nzbToSickBeard.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'SickBeard' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'SickBeard' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) diff --git a/nzbToWatcher3.py b/nzbToWatcher3.py index fd315392..a4b0a413 100755 --- a/nzbToWatcher3.py +++ b/nzbToWatcher3.py @@ -2,6 +2,6 @@ import sys import nzbToMedia -section = 'Watcher3' -result = nzbToMedia.main(sys.argv, section) +SECTION = 'Watcher3' +result = nzbToMedia.main(sys.argv, SECTION) sys.exit(result) From 5a0ac9dfa228c6c295e5233902bdd157d80fa8cb Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 20:57:44 -0500 Subject: [PATCH 12/14] Fix transcode defaults --- .gitignore | 1 + .pre-commit-config.yaml | 4 ++-- dev-requirements.txt | 7 +++++++ nzb2media/__init__.py | 26 ++++++++++++++++++++------ tests/ffmpeg_test.py | 7 +++++++ 5 files changed, 37 insertions(+), 8 deletions(-) create mode 100644 dev-requirements.txt create mode 100644 tests/ffmpeg_test.py diff --git a/.gitignore b/.gitignore index 9c372b9e..8b85af82 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ *.dist-info *.egg-info /.vscode +/htmlcov/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1df66caf..9328ea82 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,12 +10,12 @@ repos: - id: name-tests-test - id: requirements-txt-fixer - repo: https://github.com/asottile/add-trailing-comma - rev: v2.3.0 + rev: v2.4.0 hooks: - id: add-trailing-comma args: [--py36-plus] - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade args: [--py37-plus] diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 00000000..b05affb1 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1,7 @@ +black +bump2version +mypy +pre-commit +pylint[spelling] +pytest +tox diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index a143b621..a60638cf 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -208,8 +208,8 @@ ABITRATE3 = None SCODEC = None OUTPUTFASTSTART = None OUTPUTQUALITYPERCENT = None -FFMPEG = None -SEVENZIP = None +FFMPEG: pathlib.Path | None = None +SEVENZIP: pathlib.Path | None = None SHOWEXTRACT = 0 PAR2CMD = None FFPROBE = None @@ -580,8 +580,24 @@ def configure_transcoder(): HWACCEL = int(CFG['Transcoder']['hwAccel']) allow_subs = ['.mkv', '.mp4', '.m4v', 'asf', 'wma', 'wmv'] codec_alias = {'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], 'libmp3lame': ['libmp3lame', 'mp3'], 'libfaac': ['libfaac', 'aac', 'faac']} + transcode_defaults = { + 'iPad': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'iPad-1080p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1920:1080', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'iPad-720p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'Apple-TV': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'iPod': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'iPhone': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '460:320', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'PS3': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'xbox': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'Roku-480p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'Roku-720p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'Roku-1080p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 160000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + 'mkv': {'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, + 'mkv-bluray': {'VEXTENSION': '.mkv', 'VCODEC': 'libx265', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'hevc', 'h265', 'libx265', 'h.265', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, + 'mp4-scene-release': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': 19, 'VLEVEL': '3.1', 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, + 'MKV-SD': {'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': '1200k', 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '720: -1', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, + } if DEFAULTS and DEFAULTS in transcode_defaults: - transcode_defaults = {'iPad': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPad-1080p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1920:1080', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPad-720p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Apple-TV': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPod': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '1280:720', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'iPhone': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '460:320', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'PS3': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'xbox': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Roku-480p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Roku-720p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'Roku-1080p': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 160000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}, 'mkv': {'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, 'mkv-bluray': {'VEXTENSION': '.mkv', 'VCODEC': 'libx265', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'hevc', 'h265', 'libx265', 'h.265', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, 'mp4-scene-release': {'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': 19, 'VLEVEL': '3.1', 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'SCODEC': 'mov_text'}, 'MKV-SD': {'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': '1200k', 'VCRF': None, 'VLEVEL': None, 'VRESOLUTION': '720: -1', 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4'], 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'SCODEC': 'mov_text'}} VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION'] VCODEC = transcode_defaults[DEFAULTS]['VCODEC'] VPRESET = transcode_defaults[DEFAULTS]['VPRESET'] @@ -604,9 +620,7 @@ def configure_transcoder(): ACHANNELS3 = transcode_defaults[DEFAULTS]['ACHANNELS3'] ABITRATE3 = transcode_defaults[DEFAULTS]['ABITRATE3'] SCODEC = transcode_defaults[DEFAULTS]['SCODEC'] - transcode_defaults = {} # clear memory - if transcode_defaults in {'mp4-scene-release'} and not OUTPUTQUALITYPERCENT: - OUTPUTQUALITYPERCENT = 100 + del transcode_defaults if VEXTENSION in allow_subs: ALLOWSUBS = 1 if not VCODEC_ALLOW and VCODEC: diff --git a/tests/ffmpeg_test.py b/tests/ffmpeg_test.py new file mode 100644 index 00000000..264e52f2 --- /dev/null +++ b/tests/ffmpeg_test.py @@ -0,0 +1,7 @@ +import nzb2media + + +def test_has_ffmpeg(): + nzb2media.configure_utility_locations() + assert nzb2media.FFMPEG is not None + assert nzb2media.FFMPEG.exists() From a25b15d72f49f06a7fd84c18d61be5dd7cd3c2e2 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 18 Dec 2022 21:37:22 -0500 Subject: [PATCH 13/14] Fix ffmpeg test --- nzb2media/__init__.py | 69 ++++++++++++++++++++++++------------------- tests/__init__.py | 2 -- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index a60638cf..1e39ef54 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -35,14 +35,15 @@ except ImportError: sys.exit('Please install pywin32') -def which(name): +def which(name) -> pathlib.Path | None: with subprocess.Popen(['which', name], stdout=PIPE) as proc: try: proc_out, proc_err = proc.communicate() except Exception: - return '' + return None else: - return proc_out.strip().decode() + location = proc_out.strip().decode() + return pathlib.Path(location) def module_path(module=__file__): @@ -166,7 +167,7 @@ MOUNTED = None GETSUBS = False TRANSCODE = None CONCAT = None -FFMPEG_PATH = '' +FFMPEG_PATH: pathlib.Path | None = None SYS_PATH = None DUPLICATE = None IGNOREEXTENSIONS = [] @@ -313,7 +314,7 @@ def configure_general(): GIT_USER = CFG['General']['git_user'] or 'clinton-hall' GIT_BRANCH = CFG['General']['git_branch'] or 'master' FORCE_CLEAN = int(CFG['General']['force_clean']) - FFMPEG_PATH = CFG['General']['ffmpeg_path'] + FFMPEG_PATH = pathlib.Path(CFG['General']['ffmpeg_path']) SYS_PATH = CFG['General']['sys_path'] CHECK_MEDIA = int(CFG['General']['check_media']) REQUIRE_LAN = None if not CFG['General']['require_lan'] else CFG['General']['require_lan'].split(',') @@ -672,19 +673,20 @@ def configure_utility_locations(): global PAR2CMD # Setup FFMPEG, FFPROBE and SEVENZIP locations if platform.system() == 'Windows': - FFMPEG = os.path.join(FFMPEG_PATH, 'ffmpeg.exe') - FFPROBE = os.path.join(FFMPEG_PATH, 'ffprobe.exe') - SEVENZIP = os.path.join(APP_ROOT, 'nzb2media', 'extractor', 'bin', platform.machine(), '7z.exe') - SHOWEXTRACT = int(str(CFG['Windows']['show_extraction']), 0) - if not os.path.isfile(FFMPEG): # problem - FFMPEG = None - log.warning('Failed to locate ffmpeg.exe. Transcoding disabled!') - log.warning('Install ffmpeg with x264 support to enable this feature ...') - if not os.path.isfile(FFPROBE): - FFPROBE = None - if CHECK_MEDIA: - log.warning('Failed to locate ffprobe.exe. Video corruption detection disabled!') + if FFMPEG_PATH: + FFMPEG = FFMPEG_PATH / 'ffmpeg.exe' + FFPROBE = FFMPEG_PATH / 'ffprobe.exe' + SEVENZIP = APP_ROOT / f'nzb2media/extractor/bin{platform.machine()}/7z.exe' + SHOWEXTRACT = int(str(CFG['Windows']['show_extraction']), 0) + if FFMPEG and FFMPEG.exists(): # problem + FFMPEG = None + log.warning('Failed to locate ffmpeg.exe. Transcoding disabled!') log.warning('Install ffmpeg with x264 support to enable this feature ...') + if not os.path.isfile(FFPROBE): + FFPROBE = None + if CHECK_MEDIA: + log.warning('Failed to locate ffprobe.exe. Video corruption detection disabled!') + log.warning('Install ffmpeg with x264 support to enable this feature ...') else: if SYS_PATH: os.environ['PATH'] += ':' + SYS_PATH @@ -695,26 +697,31 @@ def configure_utility_locations(): if not PAR2CMD: PAR2CMD = None log.warning('Failed to locate par2. Repair and rename using par files will not be possible!') - ffmpeg_bin = os.path.join(FFMPEG_PATH, 'ffmpeg') - avconv_bin = os.path.join(FFMPEG_PATH, 'avconv') - if os.path.isfile(ffmpeg_bin) or os.access(ffmpeg_bin, os.X_OK): - FFMPEG = ffmpeg_bin - elif os.path.isfile(avconv_bin) or os.access(avconv_bin, os.X_OK): - FFMPEG = avconv_bin - else: + if FFMPEG_PATH: + ffmpeg_bin = FFMPEG_PATH / 'ffmpeg' + avconv_bin = FFMPEG_PATH / 'avconv' + if ffmpeg_bin.is_file() or os.access(ffmpeg_bin, os.X_OK): + FFMPEG = ffmpeg_bin + elif avconv_bin.is_file() or os.access(avconv_bin, os.X_OK): + FFMPEG = avconv_bin + if not FFMPEG: FFMPEG = which('ffmpeg') or which('avconv') if not FFMPEG: FFMPEG = None log.warning('Failed to locate ffmpeg. Transcoding disabled!') log.warning('Install ffmpeg with x264 support to enable this feature ...') - ffprobe_bin = os.path.join(FFMPEG_PATH, 'ffprobe') - avprobe_bin = os.path.join(FFMPEG_PATH, 'avprobe') - if os.path.isfile(ffprobe_bin) or os.access(ffprobe_bin, os.X_OK): - FFPROBE = ffprobe_bin - elif os.path.isfile(avprobe_bin) or os.access(avprobe_bin, os.X_OK): - FFPROBE = avprobe_bin - else: + + if not FFMPEG_PATH: + ffprobe_bin = FFMPEG_PATH / 'ffprobe' + avprobe_bin = FFMPEG_PATH / 'avprobe' + if ffprobe_bin.is_file() or os.access(ffprobe_bin, os.X_OK): + FFPROBE = ffprobe_bin + elif avprobe_bin.is_file() or os.access(avprobe_bin, os.X_OK): + FFPROBE = avprobe_bin + + if not FFPROBE: FFPROBE = which('ffprobe') or which('avprobe') + if not FFPROBE: FFPROBE = None if CHECK_MEDIA: diff --git a/tests/__init__.py b/tests/__init__.py index fe2164dc..e69de29b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,2 +0,0 @@ -from __future__ import annotations -__author__ = 'Justin' From 379a86e55577b28f6372927bdf68a2cd0b32a105 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Mon, 19 Dec 2022 00:39:36 -0500 Subject: [PATCH 14/14] Fix ffmpeg test --- nzb2media/__init__.py | 65 +++--------------- nzb2media/auto_process/movies.py | 6 +- nzb2media/extractor/__init__.py | 7 +- nzb2media/tool.py | 114 +++++++++++++++++++++++++++++++ nzb2media/transcoder.py | 10 +-- nzb2media/version_check.py | 10 +-- tests/ffmpeg_test.py | 7 -- tests/tool_test.py | 7 ++ 8 files changed, 148 insertions(+), 78 deletions(-) create mode 100644 nzb2media/tool.py delete mode 100644 tests/ffmpeg_test.py create mode 100644 tests/tool_test.py diff --git a/nzb2media/__init__.py b/nzb2media/__init__.py index 1e39ef54..d4332f0e 100644 --- a/nzb2media/__init__.py +++ b/nzb2media/__init__.py @@ -13,6 +13,7 @@ import time import typing from subprocess import PIPE, DEVNULL +from nzb2media import tool from nzb2media import databases from nzb2media import main_db from nzb2media import version_check @@ -212,8 +213,8 @@ OUTPUTQUALITYPERCENT = None FFMPEG: pathlib.Path | None = None SEVENZIP: pathlib.Path | None = None SHOWEXTRACT = 0 -PAR2CMD = None -FFPROBE = None +PAR2CMD: pathlib.Path | None = None +FFPROBE: pathlib.Path | None = None CHECK_MEDIA = None REQUIRE_LAN = None NICENESS = [] @@ -671,62 +672,16 @@ def configure_utility_locations(): global FFMPEG global FFPROBE global PAR2CMD + # Setup FFMPEG, FFPROBE and SEVENZIP locations + FFMPEG = tool.find_transcoder(FFMPEG_PATH) + FFPROBE = tool.find_video_corruption_detector(FFMPEG_PATH) + PAR2CMD = tool.find_archive_repairer() if platform.system() == 'Windows': - if FFMPEG_PATH: - FFMPEG = FFMPEG_PATH / 'ffmpeg.exe' - FFPROBE = FFMPEG_PATH / 'ffprobe.exe' - SEVENZIP = APP_ROOT / f'nzb2media/extractor/bin{platform.machine()}/7z.exe' - SHOWEXTRACT = int(str(CFG['Windows']['show_extraction']), 0) - if FFMPEG and FFMPEG.exists(): # problem - FFMPEG = None - log.warning('Failed to locate ffmpeg.exe. Transcoding disabled!') - log.warning('Install ffmpeg with x264 support to enable this feature ...') - if not os.path.isfile(FFPROBE): - FFPROBE = None - if CHECK_MEDIA: - log.warning('Failed to locate ffprobe.exe. Video corruption detection disabled!') - log.warning('Install ffmpeg with x264 support to enable this feature ...') + path = APP_ROOT / f'nzb2media/extractor/bin/{platform.machine()}' else: - if SYS_PATH: - os.environ['PATH'] += ':' + SYS_PATH - SEVENZIP = which('7z') or which('7zr') or which('7za') - if not SEVENZIP: - log.warning('Failed to locate 7zip. Transcoding of disk images and extraction of .7z files will not be possible!') - PAR2CMD = which('par2') - if not PAR2CMD: - PAR2CMD = None - log.warning('Failed to locate par2. Repair and rename using par files will not be possible!') - if FFMPEG_PATH: - ffmpeg_bin = FFMPEG_PATH / 'ffmpeg' - avconv_bin = FFMPEG_PATH / 'avconv' - if ffmpeg_bin.is_file() or os.access(ffmpeg_bin, os.X_OK): - FFMPEG = ffmpeg_bin - elif avconv_bin.is_file() or os.access(avconv_bin, os.X_OK): - FFMPEG = avconv_bin - if not FFMPEG: - FFMPEG = which('ffmpeg') or which('avconv') - if not FFMPEG: - FFMPEG = None - log.warning('Failed to locate ffmpeg. Transcoding disabled!') - log.warning('Install ffmpeg with x264 support to enable this feature ...') - - if not FFMPEG_PATH: - ffprobe_bin = FFMPEG_PATH / 'ffprobe' - avprobe_bin = FFMPEG_PATH / 'avprobe' - if ffprobe_bin.is_file() or os.access(ffprobe_bin, os.X_OK): - FFPROBE = ffprobe_bin - elif avprobe_bin.is_file() or os.access(avprobe_bin, os.X_OK): - FFPROBE = avprobe_bin - - if not FFPROBE: - FFPROBE = which('ffprobe') or which('avprobe') - - if not FFPROBE: - FFPROBE = None - if CHECK_MEDIA: - log.warning('Failed to locate ffprobe. Video corruption detection disabled!') - log.warning('Install ffmpeg with x264 support to enable this feature ...') + path = None + SEVENZIP = tool.find_unzip(path) def initialize(section=None): diff --git a/nzb2media/auto_process/movies.py b/nzb2media/auto_process/movies.py index 7b2144ca..5c217677 100644 --- a/nzb2media/auto_process/movies.py +++ b/nzb2media/auto_process/movies.py @@ -144,10 +144,8 @@ def process(*, section: str, dir_name: str, input_name: str = '', status: int = failure_link += '&corrupt=true' elif client_agent == 'manual': log.warning(f'No media files found in directory {dir_name} to manually process.') - return ProcessResult( - message='', - status_code=0, # Success (as far as this script is concerned) - ) + # Success (as far as this script is concerned) + return ProcessResult.success() else: log.warning(f'No media files found in directory {dir_name}. Processing this as a failed download') status = 1 diff --git a/nzb2media/extractor/__init__.py b/nzb2media/extractor/__init__.py index 6954c6d6..5e6b8772 100644 --- a/nzb2media/extractor/__init__.py +++ b/nzb2media/extractor/__init__.py @@ -34,8 +34,11 @@ def extract(file_path, output_destination): required_cmds = ['unrar', 'unzip', 'tar', 'unxz', 'unlzma', '7zr', 'bunzip2', 'gunzip'] # ## Possible future suport: # gunzip: gz (cmd will delete original archive) - # ## the following do not extract to dest dir - # '.xz': ['xz', '-d --keep'], # '.lzma': ['xz', '-d --format=lzma --keep'], # '.bz2': ['bzip2', '-d --keep'], extract_commands = { '.rar': ['unrar', 'x', '-o+', '-y'], '.tar': ['tar', '-xf'], '.zip': ['unzip'], '.tar.gz': ['tar', '-xzf'], '.tgz': ['tar', '-xzf'], '.tar.bz2': ['tar', '-xjf'], '.tbz': ['tar', '-xjf'], '.tar.lzma': ['tar', '--lzma', '-xf'], '.tlz': ['tar', '--lzma', '-xf'], '.tar.xz': ['tar', '--xz', '-xf'], '.txz': ['tar', '--xz', '-xf'], '.7z': ['7zr', 'x'], '.gz': ['gunzip'], } + # ## the following do not extract to destination dir + # '.xz': ['xz', '-d --keep'], + # '.lzma': ['xz', '-d --format=lzma --keep'], + # '.bz2': ['bzip2', '-d --keep'] + extract_commands = {'.rar': ['unrar', 'x', '-o+', '-y'], '.tar': ['tar', '-xf'], '.zip': ['unzip'], '.tar.gz': ['tar', '-xzf'], '.tgz': ['tar', '-xzf'], '.tar.bz2': ['tar', '-xjf'], '.tbz': ['tar', '-xjf'], '.tar.lzma': ['tar', '--lzma', '-xf'], '.tlz': ['tar', '--lzma', '-xf'], '.tar.xz': ['tar', '--xz', '-xf'], '.txz': ['tar', '--xz', '-xf'], '.7z': ['7zr', 'x'], '.gz': ['gunzip']} # Test command exists and if not, remove if not os.getenv('TR_TORRENT_DIR'): for cmd in required_cmds: diff --git a/nzb2media/tool.py b/nzb2media/tool.py new file mode 100644 index 00000000..db16213f --- /dev/null +++ b/nzb2media/tool.py @@ -0,0 +1,114 @@ +from __future__ import annotations + +import itertools +import logging +import os +import pathlib +import shutil +import typing + +log = logging.getLogger(__name__) +log.addHandler(logging.NullHandler()) + + +def in_path(name: str) -> pathlib.Path | None: + """Find tool if its on the system loc.""" + log.debug(f'Searching for {name} on system path') + path = shutil.which(name) + if not path: + return None + return pathlib.Path(path) + + +def at_location(root: pathlib.Path, name: str) -> pathlib.Path | None: + """Return tool if its at given loc.""" + log.debug(f'Searching for {name} at {root}') + if not name: + raise ValueError('name is required') + path = root / name + if path.exists() or os.access(path, os.X_OK): + return path + return None + + +def find(root: pathlib.Path | None, *names) -> pathlib.Path | None: + """Try to find a tool. + + Look in target location first, then system path, + and finally check the current working directory. + """ + if not names: + raise ValueError('At least one name is required.') + + # look in target location first + if root: + found_at_location: typing.Iterable[pathlib.Path | None] = (at_location(root, name) for name in names) + else: + found_at_location = [] + + # look on system path second + found_on_path = (in_path(name) for name in names) + + found = itertools.chain(found_at_location, found_on_path) + for path in found: + if path is not None: + log.info(f'Found at {path}') + return path + + # finally check current working directory + cwd = pathlib.Path.cwd() + log.debug(f'Falling back on current working directory: {cwd}') + + found_in_working_directory = (at_location(cwd, name) for name in names) + for path in found_in_working_directory: + if path is not None: + log.info(f'Found {path}') + return path + return None + + +def find_transcoder(root: pathlib.Path | None = None) -> pathlib.Path | None: + """Find a tool for transcoding.""" + log.info('Searching for transcoding tool.') + names = ('ffmpeg', 'avconv') + found = find(root, *names) + if not found: + log.debug(f'Failed to locate any of the following: {names}') + log.warning('Transcoding disabled!') + log.warning('Install ffmpeg with x264 support to enable this feature.') + return found + + +def find_video_corruption_detector(root: pathlib.Path | None = None) -> pathlib.Path | None: + """Find a tool for detecting video corruption.""" + log.info('Searching for video corruption detection tool.') + names = ('ffprobe', 'avprobe') + found = find(root, *names) + if not found: + log.debug(f'Failed to locate any of the following: {names}') + log.warning('Video corruption detection disabled!') + log.warning('Install ffmpeg with x264 support to enable this feature.') + return found + + +def find_archive_repairer(root: pathlib.Path | None = None) -> pathlib.Path | None: + """Find a tool for repairing and renaming archives.""" + log.info('Searching for file repair and renaming tool.') + names = ('par2',) + found = find(root, *names) + if not found: + log.debug(f'Failed to locate any of the following: {names}') + log.warning('Archive repair and renaming disabled!') + log.warning('Install a parity archive repair tool to enable this feature.') + return found + + +def find_unzip(root: pathlib.Path | None = None) -> pathlib.Path | None: + """Find a tool for unzipping archives.""" + log.info('Searching for an unzipping tool.') + names = ('7z', '7zr', '7za') + found = find(root, *names) + if not found: + log.debug(f'Failed to locate any of the following: {names}') + log.warning('Transcoding of disk images and extraction zip files will not be possible!') + return found diff --git a/nzb2media/transcoder.py b/nzb2media/transcoder.py index 755f34b7..b7c9347f 100644 --- a/nzb2media/transcoder.py +++ b/nzb2media/transcoder.py @@ -72,7 +72,7 @@ def zip_out(file, img): if os.path.isfile(file): cmd = ['cat', file] else: - cmd = [nzb2media.SEVENZIP, '-so', 'e', img, file] + cmd = [os.fspath(nzb2media.SEVENZIP), '-so', 'e', img, file] try: with subprocess.Popen(cmd, stdout=PIPE, stderr=DEVNULL) as proc: return proc @@ -87,11 +87,11 @@ def get_video_details(videofile, img=None): file = videofile if not nzb2media.FFPROBE: return video_details, result - print_format = '-of' if 'avprobe' in nzb2media.FFPROBE else '-print_format' + print_format = '-of' if 'avprobe' in nzb2media.FFPROBE.name else '-print_format' try: if img: videofile = '-' - command = [nzb2media.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error', videofile] + command = [os.fspath(nzb2media.FFPROBE), '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', '-show_error', videofile] print_cmd(command) if img: procin = zip_out(file, img) @@ -106,7 +106,7 @@ def get_video_details(videofile, img=None): video_details = json.loads(proc_out.decode()) except Exception: try: # try this again without -show error in case of ffmpeg limitation - command = [nzb2media.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile] + command = [os.fspath(nzb2media.FFPROBE), '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile] print_cmd(command) if img: procin = zip_out(file, img) @@ -469,7 +469,7 @@ def build_commands(file, new_dir, movie_name): break if sub['codec_name'] in {'dvd_subtitle', 'VobSub'} and nzb2media.SCODEC == 'mov_text': continue # We can't convert these. - _inded = sub['index'] + _index = sub['index'] map_cmd.extend(['-map', f'0:{_index}']) s_mapped.extend([sub['index']]) if nzb2media.SINCLUDE: diff --git a/nzb2media/version_check.py b/nzb2media/version_check.py index dab73bef..c4707913 100644 --- a/nzb2media/version_check.py +++ b/nzb2media/version_check.py @@ -261,8 +261,8 @@ class GitUpdateManager(UpdateManager): return False def update(self): - """ - Check git for a new version. + """Check git for a new version. + Calls git pull origin in order to update Sick Beard. Returns a bool depending on the call's success. """ @@ -308,8 +308,8 @@ class SourceUpdateManager(UpdateManager): return False def _check_github_for_update(self): - """ - Check Github for a new version. + """ Check Github for a new version. + Uses pygithub to ask github if there is a newer version than the provided commit hash. If there is a newer version it sets Sick Beard's version text. @@ -388,7 +388,7 @@ class SourceUpdateManager(UpdateManager): # walk temp folder and move files to main folder log.info(f'Moving files from {content_dir} to {nzb2media.APP_ROOT}') for dirname, _, filenames in os.walk(content_dir): - dirname = dirname[len(content_dir) + 1 :] + dirname = dirname[len(content_dir) + 1:] for curfile in filenames: old_path = os.path.join(content_dir, dirname, curfile) new_path = os.path.join(nzb2media.APP_ROOT, dirname, curfile) diff --git a/tests/ffmpeg_test.py b/tests/ffmpeg_test.py deleted file mode 100644 index 264e52f2..00000000 --- a/tests/ffmpeg_test.py +++ /dev/null @@ -1,7 +0,0 @@ -import nzb2media - - -def test_has_ffmpeg(): - nzb2media.configure_utility_locations() - assert nzb2media.FFMPEG is not None - assert nzb2media.FFMPEG.exists() diff --git a/tests/tool_test.py b/tests/tool_test.py new file mode 100644 index 00000000..bb799282 --- /dev/null +++ b/tests/tool_test.py @@ -0,0 +1,7 @@ +import nzb2media.tool + + +def test_tool_in_path(): + ffmpeg = nzb2media.tool.in_path('ffmpeg') + avprobe = nzb2media.tool.in_path('avprobe') + assert ffmpeg or avprobe