From 97e1ed71b319fe2d5b10a62eb377065c4daf5873 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 16 Dec 2018 20:00:13 -0500 Subject: [PATCH 1/4] PEP8 Variable in function should be lowercase --- TorrentToMedia.py | 288 +++++++++--------- core/autoProcess/autoProcessComics.py | 16 +- core/autoProcess/autoProcessGames.py | 24 +- core/autoProcess/autoProcessMovie.py | 140 ++++----- core/autoProcess/autoProcessMusic.py | 75 ++--- core/autoProcess/autoProcessTV.py | 134 ++++----- core/extractor/extractor.py | 36 +-- core/nzbToMediaAutoFork.py | 8 +- core/nzbToMediaConfig.py | 304 +++++++++---------- core/nzbToMediaDB.py | 44 +-- core/nzbToMediaSceneExceptions.py | 39 +-- core/nzbToMediaUserScript.py | 14 +- core/nzbToMediaUtil.py | 407 +++++++++++++------------- core/transcoder/transcoder.py | 254 ++++++++-------- nzbToMedia.py | 147 +++++----- 15 files changed, 977 insertions(+), 953 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index 22bac54e..a32c2983 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -12,64 +12,68 @@ from libs.six import text_type def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): + input_directory = inputDirectory + input_name = inputName + input_category = inputCategory + input_hash = inputHash status = 1 # 1 = failed | 0 = success root = 0 - foundFile = 0 + found_file = 0 if clientAgent != 'manual' and not core.DOWNLOADINFO: - logger.debug('Adding TORRENT download info for directory {0} to database'.format(inputDirectory)) + logger.debug('Adding TORRENT download info for directory {0} to database'.format(input_directory)) - myDB = nzbToMediaDB.DBConnection() + my_db = nzbToMediaDB.DBConnection() - inputDirectory1 = inputDirectory - inputName1 = inputName + input_directory1 = input_directory + input_name1 = input_name try: - encoded, inputDirectory1 = CharReplace(inputDirectory) - encoded, inputName1 = CharReplace(inputName) + encoded, input_directory1 = CharReplace(input_directory) + encoded, input_name1 = CharReplace(input_name) except: pass - controlValueDict = {"input_directory": text_type(inputDirectory1)} - newValueDict = {"input_name": text_type(inputName1), - "input_hash": text_type(inputHash), + control_value_dict = {"input_directory": text_type(input_directory1)} + new_value_dict = {"input_name": text_type(input_name1), + "input_hash": text_type(input_hash), "input_id": text_type(inputID), "client_agent": text_type(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } - myDB.upsert("downloads", newValueDict, controlValueDict) + my_db.upsert("downloads", new_value_dict, control_value_dict) - logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(inputDirectory, inputName, inputCategory)) + logger.debug("Received Directory: {0} | Name: {1} | Category: {2}".format(input_directory, input_name, input_category)) # Confirm the category by parsing directory structure - inputDirectory, inputName, inputCategory, root = core.category_search(inputDirectory, inputName, inputCategory, - root, core.CATEGORIES) - if inputCategory == "": - inputCategory = "UNCAT" + input_directory, input_name, input_category, root = core.category_search(input_directory, input_name, input_category, + root, core.CATEGORIES) + if input_category == "": + input_category = "UNCAT" - usercat = inputCategory + usercat = input_category try: - inputName = inputName.encode(core.SYS_ENCODING) + input_name = input_name.encode(core.SYS_ENCODING) except UnicodeError: pass try: - inputDirectory = inputDirectory.encode(core.SYS_ENCODING) + input_directory = input_directory.encode(core.SYS_ENCODING) except UnicodeError: pass logger.debug("Determined Directory: {0} | Name: {1} | Category: {2}".format - (inputDirectory, inputName, inputCategory)) + (input_directory, input_name, input_category)) # auto-detect section - section = core.CFG.findsection(inputCategory).isenabled() + section = core.CFG.findsection(input_category).isenabled() if section is None: section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error('Category:[{0}] is not defined or is not enabled. ' 'Please rename it or ensure it is enabled for the appropriate section ' 'in your autoProcessMedia.cfg and try again.'.format - (inputCategory)) + (input_category)) return [-1, ""] else: usercat = "ALL" @@ -82,95 +86,95 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, return [-1, ""] if section: - sectionName = section.keys()[0] - logger.info('Auto-detected SECTION:{0}'.format(sectionName)) + section_name = section.keys()[0] + logger.info('Auto-detected SECTION:{0}'.format(section_name)) else: logger.error("Unable to locate a section with subsection:{0} " "enabled in your autoProcessMedia.cfg, exiting!".format - (inputCategory)) + (input_category)) return [-1, ""] - section = dict(section[sectionName][usercat]) # Type cast to dict() to allow effective usage of .get() + section = dict(section[section_name][usercat]) # Type cast to dict() to allow effective usage of .get() - Torrent_NoLink = int(section.get("Torrent_NoLink", 0)) + torrent_no_link = int(section.get("Torrent_NoLink", 0)) keep_archive = int(section.get("keep_archive", 0)) extract = int(section.get('extract', 0)) extensions = section.get('user_script_mediaExtensions', "").lower().split(',') - uniquePath = int(section.get("unique_path", 1)) + unique_path = int(section.get("unique_path", 1)) if clientAgent != 'manual': - core.pause_torrent(clientAgent, inputHash, inputID, inputName) + core.pause_torrent(clientAgent, input_hash, inputID, input_name) # In case input is not directory, make sure to create one. # This way Processing is isolated. - if not os.path.isdir(os.path.join(inputDirectory, inputName)): - basename = os.path.basename(inputDirectory) - basename = core.sanitizeName(inputName) \ - if inputName == basename else os.path.splitext(core.sanitizeName(inputName))[0] - outputDestination = os.path.join(core.OUTPUTDIRECTORY, inputCategory, basename) - elif uniquePath: - outputDestination = os.path.normpath( - core.os.path.join(core.OUTPUTDIRECTORY, inputCategory, core.sanitizeName(inputName).replace(" ","."))) + if not os.path.isdir(os.path.join(input_directory, input_name)): + basename = os.path.basename(input_directory) + basename = core.sanitizeName(input_name) \ + if input_name == basename else os.path.splitext(core.sanitizeName(input_name))[0] + output_destination = os.path.join(core.OUTPUTDIRECTORY, input_category, basename) + elif unique_path: + output_destination = os.path.normpath( + core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitizeName(input_name).replace(" ","."))) else: - outputDestination = os.path.normpath( - core.os.path.join(core.OUTPUTDIRECTORY, inputCategory)) + output_destination = os.path.normpath( + core.os.path.join(core.OUTPUTDIRECTORY, input_category)) try: - outputDestination = outputDestination.encode(core.SYS_ENCODING) + output_destination = output_destination.encode(core.SYS_ENCODING) except UnicodeError: pass - if outputDestination in inputDirectory: - outputDestination = inputDirectory + if output_destination in input_directory: + output_destination = input_directory - logger.info("Output directory set to: {0}".format(outputDestination)) + logger.info("Output directory set to: {0}".format(output_destination)) - if core.SAFE_MODE and outputDestination == core.TORRENT_DEFAULTDIR: + if core.SAFE_MODE and output_destination == core.TORRENT_DEFAULTDIR: logger.error('The output directory:[{0}] is the Download Directory. ' 'Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format - (inputDirectory)) + (input_directory)) return [-1, ""] - logger.debug("Scanning files in directory: {0}".format(inputDirectory)) + logger.debug("Scanning files in directory: {0}".format(input_directory)) - if sectionName in ['HeadPhones', 'Lidarr']: + if section_name in ['HeadPhones', 'Lidarr']: core.NOFLATTEN.extend( - inputCategory) # Make sure we preserve folder structure for HeadPhones. + input_category) # Make sure we preserve folder structure for HeadPhones. now = datetime.datetime.now() if extract == 1: - inputFiles = core.listMediaFiles(inputDirectory, archives=False, other=True, otherext=extensions) + input_files = core.listMediaFiles(input_directory, archives=False, other=True, otherext=extensions) else: - inputFiles = core.listMediaFiles(inputDirectory, other=True, otherext=extensions) - if len(inputFiles) == 0 and os.path.isfile(inputDirectory): - inputFiles = [inputDirectory] - logger.debug("Found 1 file to process: {0}".format(inputDirectory)) + input_files = core.listMediaFiles(input_directory, other=True, otherext=extensions) + if len(input_files) == 0 and os.path.isfile(input_directory): + input_files = [input_directory] + logger.debug("Found 1 file to process: {0}".format(input_directory)) else: - logger.debug("Found {0} files in {1}".format(len(inputFiles), inputDirectory)) - for inputFile in inputFiles: - filePath = os.path.dirname(inputFile) - fileName, fileExt = os.path.splitext(os.path.basename(inputFile)) - fullFileName = os.path.basename(inputFile) + logger.debug("Found {0} files in {1}".format(len(input_files), input_directory)) + for inputFile in input_files: + file_path = os.path.dirname(inputFile) + file_name, file_ext = os.path.splitext(os.path.basename(inputFile)) + full_file_name = os.path.basename(inputFile) - targetFile = core.os.path.join(outputDestination, fullFileName) - if inputCategory in core.NOFLATTEN: - if not os.path.basename(filePath) in outputDestination: - targetFile = core.os.path.join( - core.os.path.join(outputDestination, os.path.basename(filePath)), fullFileName) + target_file = core.os.path.join(output_destination, full_file_name) + if input_category in core.NOFLATTEN: + if not os.path.basename(file_path) in output_destination: + target_file = core.os.path.join( + core.os.path.join(output_destination, os.path.basename(file_path)), full_file_name) logger.debug("Setting outputDestination to {0} to preserve folder structure".format - (os.path.dirname(targetFile))) + (os.path.dirname(target_file))) try: - targetFile = targetFile.encode(core.SYS_ENCODING) + target_file = target_file.encode(core.SYS_ENCODING) except UnicodeError: pass if root == 1: - if not foundFile: - logger.debug("Looking for {0} in: {1}".format(inputName, inputFile)) - if any([core.sanitizeName(inputName) in core.sanitizeName(inputFile), - core.sanitizeName(fileName) in core.sanitizeName(inputName)]): - foundFile = True + if not found_file: + logger.debug("Looking for {0} in: {1}".format(input_name, inputFile)) + if any([core.sanitizeName(input_name) in core.sanitizeName(inputFile), + core.sanitizeName(file_name) in core.sanitizeName(input_name)]): + found_file = True logger.debug("Found file {0} that matches Torrent Name {1}".format - (fullFileName, inputName)) + (full_file_name, input_name)) else: continue @@ -178,78 +182,78 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(inputFile)) ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(inputFile)) - if not foundFile: + if not found_file: logger.debug("Looking for files with modified/created dates less than 5 minutes old.") if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)): - foundFile = True + found_file = True logger.debug("Found file {0} with date modified/created less than 5 minutes ago.".format - (fullFileName)) + (full_file_name)) else: continue # This file has not been recently moved or created, skip it - if Torrent_NoLink == 0: + if torrent_no_link == 0: try: - core.copy_link(inputFile, targetFile, core.USELINK) - core.rmReadOnly(targetFile) + core.copy_link(inputFile, target_file, core.USELINK) + core.rmReadOnly(target_file) except: - logger.error("Failed to link: {0} to {1}".format(inputFile, targetFile)) + logger.error("Failed to link: {0} to {1}".format(inputFile, target_file)) - inputName, outputDestination = convert_to_ascii(inputName, outputDestination) + input_name, output_destination = convert_to_ascii(input_name, output_destination) if extract == 1: - logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) - core.extractFiles(inputDirectory, outputDestination, keep_archive) + logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) + core.extractFiles(input_directory, output_destination, keep_archive) - if inputCategory not in core.NOFLATTEN: + if input_category not in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. - core.flatten(outputDestination) + core.flatten(output_destination) # Now check if video files exist in destination: - if sectionName in ["SickBeard", "NzbDrone", "Sonarr", "CouchPotato", "Radarr"]: - numVideos = len( - core.listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False)) - if numVideos > 0: - logger.info("Found {0} media files in {1}".format(numVideos, outputDestination)) + if section_name in ["SickBeard", "NzbDrone", "Sonarr", "CouchPotato", "Radarr"]: + num_videos = len( + core.listMediaFiles(output_destination, media=True, audio=False, meta=False, archives=False)) + if num_videos > 0: + logger.info("Found {0} media files in {1}".format(num_videos, output_destination)) status = 0 elif extract != 1: - logger.info("Found no media files in {0}. Sending to {1} to process".format(outputDestination, sectionName)) + logger.info("Found no media files in {0}. Sending to {1} to process".format(output_destination, section_name)) status = 0 else: - logger.warning("Found no media files in {0}".format(outputDestination)) + logger.warning("Found no media files in {0}".format(output_destination)) # Only these sections can handling failed downloads # so make sure everything else gets through without the check for failed - if sectionName not in ['CouchPotato', 'Radarr', 'SickBeard', 'NzbDrone', 'Sonarr']: + if section_name not in ['CouchPotato', 'Radarr', 'SickBeard', 'NzbDrone', 'Sonarr']: status = 0 - logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, usercat, inputName)) + logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, usercat, input_name)) if core.TORRENT_CHMOD_DIRECTORY: - core.rchmod(outputDestination, core.TORRENT_CHMOD_DIRECTORY) + core.rchmod(output_destination, core.TORRENT_CHMOD_DIRECTORY) result = [0, ""] - if sectionName == 'UserScript': - result = external_script(outputDestination, inputName, inputCategory, section) + if section_name == 'UserScript': + result = external_script(output_destination, input_name, input_category, section) - elif sectionName in ['CouchPotato', 'Radarr']: - result = core.autoProcessMovie().process(sectionName, outputDestination, inputName, - status, clientAgent, inputHash, inputCategory) - elif sectionName in ['SickBeard', 'NzbDrone', 'Sonarr']: - if inputHash: - inputHash = inputHash.upper() - result = core.autoProcessTV().processEpisode(sectionName, outputDestination, inputName, - status, clientAgent, inputHash, inputCategory) - elif sectionName in ['HeadPhones', 'Lidarr']: - result = core.autoProcessMusic().process(sectionName, outputDestination, inputName, - status, clientAgent, inputCategory) - elif sectionName == 'Mylar': - result = core.autoProcessComics().processEpisode(sectionName, outputDestination, inputName, - status, clientAgent, inputCategory) - elif sectionName == 'Gamez': - result = core.autoProcessGames().process(sectionName, outputDestination, inputName, - status, clientAgent, inputCategory) + elif section_name in ['CouchPotato', 'Radarr']: + result = core.autoProcessMovie().process(section_name, output_destination, input_name, + status, clientAgent, input_hash, input_category) + elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: + if input_hash: + input_hash = input_hash.upper() + result = core.autoProcessTV().processEpisode(section_name, output_destination, input_name, + status, clientAgent, input_hash, input_category) + elif section_name in ['HeadPhones', 'Lidarr']: + result = core.autoProcessMusic().process(section_name, output_destination, input_name, + status, clientAgent, input_category) + elif section_name == 'Mylar': + result = core.autoProcessComics().processEpisode(section_name, output_destination, input_name, + status, clientAgent, input_category) + elif section_name == 'Gamez': + result = core.autoProcessGames().process(section_name, output_destination, input_name, + status, clientAgent, input_category) - plex_update(inputCategory) + plex_update(input_category) if result[0] != 0: if not core.TORRENT_RESUME_ON_FAILURE: @@ -258,26 +262,26 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, elif clientAgent != 'manual': logger.error("A problem was reported in the autoProcess* script. " "If torrent was paused we will resume seeding") - core.resume_torrent(clientAgent, inputHash, inputID, inputName) + core.resume_torrent(clientAgent, input_hash, inputID, input_name) else: if clientAgent != 'manual': # update download status in our DB - core.update_downloadInfoStatus(inputName, 1) + core.update_downloadInfoStatus(input_name, 1) # remove torrent if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: - logger.debug('Checking for sym-links to re-direct in: {0}'.format(inputDirectory)) - for dirpath, dirs, files in os.walk(inputDirectory): + logger.debug('Checking for sym-links to re-direct in: {0}'.format(input_directory)) + for dirpath, dirs, files in os.walk(input_directory): for file in files: logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) replace_links(os.path.join(dirpath, file)) - core.remove_torrent(clientAgent, inputHash, inputID, inputName) + core.remove_torrent(clientAgent, input_hash, inputID, input_name) - if not sectionName == 'UserScript': + if not section_name == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # cleanup our processing folders of any misc unwanted files and empty directories - core.cleanDir(outputDestination, sectionName, inputCategory) + core.cleanDir(output_destination, section_name, input_category) return result @@ -287,7 +291,7 @@ def main(args): core.initialize() # clientAgent for Torrents - clientAgent = core.TORRENT_CLIENTAGENT + client_agent = core.TORRENT_CLIENTAGENT logger.info("#########################################################") logger.info("## ..::[{0}]::.. ##".format(os.path.basename(__file__))) @@ -300,13 +304,13 @@ def main(args): result = [0, ""] try: - inputDirectory, inputName, inputCategory, inputHash, inputID = core.parse_args(clientAgent, args) + input_directory, input_name, input_category, input_hash, input_id = core.parse_args(client_agent, args) except: logger.error("There was a problem loading variables") return -1 - if inputDirectory and inputName and inputHash and inputID: - result = processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent) + if input_directory and input_name and input_hash and input_id: + result = processTorrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) else: # Perform Manual Post-Processing logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") @@ -315,42 +319,42 @@ def main(args): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dirName in core.getDirs(section, subsection, link='hard'): + for dir_name in core.getDirs(section, subsection, link='hard'): logger.info("Starting manual run for {0}:{1} - Folder:{2}".format - (section, subsection, dirName)) + (section, subsection, dir_name)) logger.info("Checking database for download info for {0} ...".format - (os.path.basename(dirName))) - core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dirName), 0) + (os.path.basename(dir_name))) + core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dir_name), 0) if core.DOWNLOADINFO: - clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) - inputHash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) - inputID = text_type(core.DOWNLOADINFO[0].get('input_id', '')) + client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) + input_hash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) + input_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) logger.info("Found download info for {0}, " - "setting variables now ...".format(os.path.basename(dirName))) + "setting variables now ...".format(os.path.basename(dir_name))) else: logger.info('Unable to locate download info for {0}, ' 'continuing to try and process this release ...'.format - (os.path.basename(dirName))) - clientAgent = 'manual' - inputHash = '' - inputID = '' + (os.path.basename(dir_name))) + client_agent = 'manual' + input_hash = '' + input_id = '' - if clientAgent.lower() not in core.TORRENT_CLIENTS: + if client_agent.lower() not in core.TORRENT_CLIENTS: continue try: - dirName = dirName.encode(core.SYS_ENCODING) + dir_name = dir_name.encode(core.SYS_ENCODING) except UnicodeError: pass - inputName = os.path.basename(dirName) + input_name = os.path.basename(dir_name) try: - inputName = inputName.encode(core.SYS_ENCODING) + input_name = input_name.encode(core.SYS_ENCODING) except UnicodeError: pass - results = processTorrent(dirName, inputName, subsection, inputHash or None, inputID or None, - clientAgent) + results = processTorrent(dir_name, input_name, subsection, input_hash or None, input_id or None, + client_agent) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 56d65f2e..6d4e87f7 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -13,6 +13,8 @@ requests.packages.urllib3.disable_warnings() class autoProcessComics(object): def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): + dir_name = dirName + input_name = inputName apc_version = "2.04" comicrn_version = "1.01" @@ -32,19 +34,19 @@ class autoProcessComics(object): logger.error("Server did not respond. Exiting", section) return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] - inputName, dirName = convert_to_ascii(inputName, dirName) - clean_name, ext = os.path.splitext(inputName) + input_name, dir_name = convert_to_ascii(input_name, dir_name) + clean_name, ext = os.path.splitext(input_name) if len(ext) == 4: # we assume this was a standard extension. - inputName = clean_name + input_name = clean_name params = { 'cmd': 'forceProcess', 'apikey': apikey, - 'nzb_folder': remoteDir(dirName) if remote_path else dirName, + 'nzb_folder': remoteDir(dir_name) if remote_path else dir_name, } - if inputName is not None: - params['nzb_name'] = inputName + if input_name is not None: + params['nzb_name'] = input_name params['failed'] = int(status) params['apc_version'] = apc_version params['comicrn_version'] = comicrn_version @@ -72,7 +74,7 @@ class autoProcessComics(object): if success: logger.postprocess("SUCCESS: This issue has been processed successfully", section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] else: logger.warning("The issue does not appear to have successfully processed. Please check your Logs", section) return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 25b4c12f..c45193c9 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -14,6 +14,8 @@ requests.packages.urllib3.disable_warnings() class autoProcessGames(object): def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): + dir_name = dirName + input_name = inputName status = int(status) cfg = dict(core.CFG[section][inputCategory]) @@ -31,19 +33,19 @@ class autoProcessGames(object): logger.error("Server did not respond. Exiting", section) return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] - inputName, dirName = convert_to_ascii(inputName, dirName) + input_name, dir_name = convert_to_ascii(input_name, dir_name) - fields = inputName.split("-") + fields = input_name.split("-") - gamezID = fields[0].replace("[", "").replace("]", "").replace(" ", "") + gamez_id = fields[0].replace("[", "").replace("]", "").replace(" ", "") - downloadStatus = 'Downloaded' if status == 0 else 'Wanted' + download_status = 'Downloaded' if status == 0 else 'Wanted' params = { 'api_key': apikey, 'mode': 'UPDATEREQUESTEDSTATUS', - 'db_id': gamezID, - 'status': downloadStatus + 'db_id': gamez_id, + 'status': download_status } logger.debug("Opening URL: {0}".format(url), section) @@ -59,9 +61,9 @@ class autoProcessGames(object): if library: logger.postprocess("moving files to library: {0}".format(library), section) try: - shutil.move(dirName, os.path.join(library, inputName)) + shutil.move(dir_name, os.path.join(library, input_name)) except: - logger.error("Unable to move {0} to {1}".format(dirName, os.path.join(library, inputName)), section) + logger.error("Unable to move {0} to {1}".format(dir_name, os.path.join(library, input_name)), section) return [1, "{0}: Failed to post-process - Unable to move files".format(section)] else: logger.error("No library specified to move files to. Please edit your configuration.", section) @@ -71,8 +73,8 @@ class autoProcessGames(object): logger.error("Server returned status {0}".format(r.status_code), section) return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif result['success']: - logger.postprocess("SUCCESS: Status for {0} has been set to {1} in Gamez".format(gamezID, downloadStatus), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + logger.postprocess("SUCCESS: Status for {0} has been set to {1} in Gamez".format(gamez_id, download_status), section) + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] else: - logger.error("FAILED: Status for {0} has NOT been updated in Gamez".format(gamezID), section) + logger.error("FAILED: Status for {0} has NOT been updated in Gamez".format(gamez_id), section) return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 200d2653..44daf1e7 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -146,6 +146,8 @@ class autoProcessMovie(object): return False def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None): + dir_name = dirName + input_name = inputName cfg = dict(core.CFG[section][inputCategory]) @@ -158,9 +160,9 @@ class autoProcessMovie(object): method = None #added importMode for Radarr config if section == "Radarr": - importMode = cfg.get("importMode","Move") + import_mode = cfg.get("importMode","Move") else: - importMode = None + import_mode = None delete_failed = int(cfg["delete_failed"]) wait_for = int(cfg["wait_for"]) ssl = int(cfg.get("ssl", 0)) @@ -174,19 +176,19 @@ class autoProcessMovie(object): else: extract = int(cfg.get("extract", 0)) - imdbid = find_imdbid(dirName, inputName, omdbapikey) + imdbid = find_imdbid(dir_name, input_name, omdbapikey) if section == "CouchPotato": - baseURL = "{0}{1}:{2}{3}/api/{4}/".format(protocol, host, port, web_root, apikey) + base_url = "{0}{1}:{2}{3}/api/{4}/".format(protocol, host, port, web_root, apikey) if section == "Radarr": - baseURL = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root) + base_url = "{0}{1}:{2}{3}/api/command".format(protocol, host, port, web_root) url2 = "{0}{1}:{2}{3}/api/config/downloadClient".format(protocol, host, port, web_root) headers = {'X-Api-Key': apikey} if not apikey: logger.info('No CouchPotato or Radarr apikey entered. Performing transcoder functions only') release = None - elif server_responding(baseURL): + elif server_responding(base_url): if section == "CouchPotato": - release = self.get_release(baseURL, imdbid, download_id) + release = self.get_release(base_url, imdbid, download_id) else: release = None else: @@ -208,28 +210,28 @@ class autoProcessMovie(object): except: pass - if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. - dirName = os.path.split(os.path.normpath(dirName))[0] + if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name. + dir_name = os.path.split(os.path.normpath(dir_name))[0] - SpecificPath = os.path.join(dirName, str(inputName)) - cleanName = os.path.splitext(SpecificPath) - if cleanName[1] == ".nzb": - SpecificPath = cleanName[0] - if os.path.isdir(SpecificPath): - dirName = SpecificPath + specific_path = os.path.join(dir_name, str(input_name)) + clean_name = os.path.splitext(specific_path) + if clean_name[1] == ".nzb": + specific_path = clean_name[0] + if os.path.isdir(specific_path): + dir_name = specific_path - process_all_exceptions(inputName, dirName) - inputName, dirName = convert_to_ascii(inputName, dirName) + process_all_exceptions(input_name, dir_name) + input_name, dir_name = convert_to_ascii(input_name, dir_name) - if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: - logger.debug('Checking for archives to extract in directory: {0}'.format(dirName)) - core.extractFiles(dirName) - inputName, dirName = convert_to_ascii(inputName, dirName) + if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) + core.extractFiles(dir_name) + input_name, dir_name = convert_to_ascii(input_name, dir_name) good_files = 0 num_files = 0 # Check video files for corruption - for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): + for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 if transcoder.isVideoGood(video, status): import_subs(video) @@ -246,47 +248,47 @@ class autoProcessMovie(object): failureLink += '&corrupt=true' status = 1 elif clientAgent == "manual": - logger.warning("No media files found in directory {0} to manually process.".format(dirName), section) + logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section) return [0, ""] # Success (as far as this script is concerned) else: - logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section) + logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dir_name), section) status = 1 if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if status == 0: if core.TRANSCODE == 1: - result, newDirName = transcoder.Transcode_directory(dirName) + result, new_dir_name = transcoder.Transcode_directory(dir_name) if result == 0: - logger.debug("Transcoding succeeded for files in {0}".format(dirName), section) - dirName = newDirName + logger.debug("Transcoding succeeded for files in {0}".format(dir_name), section) + dir_name = new_dir_name chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8) logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) if chmod_directory: - logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) - core.rchmod(dirName, chmod_directory) + logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dir_name), section) + core.rchmod(dir_name, chmod_directory) else: - logger.error("Transcoding failed for files in {0}".format(dirName), section) + logger.error("Transcoding failed for files in {0}".format(dir_name), section) return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] - for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): + for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): if not release and ".cp(tt" not in video and imdbid: - videoName, videoExt = os.path.splitext(video) - video2 = "{0}.cp({1}){2}".format(videoName, imdbid, videoExt) + video_name, video_ext = os.path.splitext(video) + video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext) if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): logger.debug('Renaming: {0} to: {1}'.format(video, video2)) os.rename(video, video2) if not apikey: #If only using Transcoder functions, exit here. logger.info('No CouchPotato or Radarr apikey entered. Processing completed.') - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] params = {} if download_id and release_id: params['downloader'] = downloader or clientAgent params['download_id'] = download_id - params['media_folder'] = remoteDir(dirName) if remote_path else dirName + params['media_folder'] = remoteDir(dir_name) if remote_path else dir_name if section == "CouchPotato": if method == "manage": @@ -295,22 +297,22 @@ class autoProcessMovie(object): else: command = "renamer.scan" - url = "{0}{1}".format(baseURL, command) + url = "{0}{1}".format(base_url, command) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) - logger.postprocess("Starting {0} scan for {1}".format(method, inputName), section) + logger.postprocess("Starting {0} scan for {1}".format(method, input_name), section) if section == "Radarr": - payload = {'name': 'DownloadedMoviesScan', 'path': params['media_folder'], 'downloadClientId': download_id,'importMode' : importMode} + payload = {'name': 'DownloadedMoviesScan', 'path': params['media_folder'], 'downloadClientId': download_id,'importMode' : import_mode} if not download_id: payload.pop("downloadClientId") - logger.debug("Opening URL: {0} with PARAMS: {1}".format(baseURL, payload), section) - logger.postprocess("Starting DownloadedMoviesScan scan for {0}".format(inputName), section) + logger.debug("Opening URL: {0} with PARAMS: {1}".format(base_url, payload), section) + logger.postprocess("Starting DownloadedMoviesScan scan for {0}".format(input_name), section) try: if section == "CouchPotato": r = requests.get(url, params=params, verify=False, timeout=(30, 1800)) else: - r = requests.post(baseURL, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800)) + r = requests.post(base_url, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800)) except requests.ConnectionError: logger.error("Unable to open URL", section) return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] @@ -320,27 +322,27 @@ class autoProcessMovie(object): logger.error("Server returned status {0}".format(r.status_code), section) return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif section == "CouchPotato" and result['success']: - logger.postprocess("SUCCESS: Finished {0} scan for folder {1}".format(method, dirName), section) + logger.postprocess("SUCCESS: Finished {0} scan for folder {1}".format(method, dir_name), section) if method == "manage": - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] elif section == "Radarr": logger.postprocess("Radarr response: {0}".format(result['state'])) try: res = json.loads(r.content) scan_id = int(res['id']) logger.debug("Scan started with id: {0}".format(scan_id), section) - Started = True + started = True except Exception as e: logger.warning("No scan id was returned due to: {0}".format(e), section) scan_id = None else: - logger.error("FAILED: {0} scan was unable to finish for folder {1}. exiting!".format(method, dirName), + logger.error("FAILED: {0} scan was unable to finish for folder {1}. exiting!".format(method, dir_name), section) return [1, "{0}: Failed to post-process - Server did not return success".format(section)] else: core.FAILED = True - logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(inputName), section) + logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section) if failureLink: reportNzb(failureLink, clientAgent) @@ -348,19 +350,19 @@ class autoProcessMovie(object): logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section) return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader. - if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) - rmDir(dirName) + if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: + logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) + rmDir(dir_name) if not release_id and not media_id: - logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(inputName), + logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(input_name), section) return [1, "{0}: Failed to post-process - Failed download not found in {1}".format(section, section)] if release_id: - logger.postprocess("Setting failed release {0} to ignored ...".format(inputName), section) + logger.postprocess("Setting failed release {0} to ignored ...".format(input_name), section) - url = "{url}release.ignore".format(url=baseURL) + url = "{url}release.ignore".format(url=base_url) params = {'id': release_id} logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) @@ -376,14 +378,14 @@ class autoProcessMovie(object): logger.error("Server returned status {0}".format(r.status_code), section) return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif result['success']: - logger.postprocess("SUCCESS: {0} has been set to ignored ...".format(inputName), section) + logger.postprocess("SUCCESS: {0} has been set to ignored ...".format(input_name), section) else: - logger.warning("FAILED: Unable to set {0} to ignored!".format(inputName), section) - return [1, "{0}: Failed to post-process - Unable to set {1} to ignored".format(section, inputName)] + logger.warning("FAILED: Unable to set {0} to ignored!".format(input_name), section) + return [1, "{0}: Failed to post-process - Unable to set {1} to ignored".format(section, input_name)] logger.postprocess("Trying to snatch the next highest ranked release.", section) - url = "{0}movie.searcher.try_next".format(baseURL) + url = "{0}movie.searcher.try_next".format(base_url) logger.debug("Opening URL: {0}".format(url), section) try: @@ -412,7 +414,7 @@ class autoProcessMovie(object): while time.time() < timeout: # only wait 2 (default) minutes, then return. logger.postprocess("Checking for status change, please stand by ...", section) if section == "CouchPotato": - release = self.get_release(baseURL, imdbid, download_id, release_id) + release = self.get_release(base_url, imdbid, download_id, release_id) scan_id = None else: release = None @@ -424,35 +426,35 @@ class autoProcessMovie(object): if release_status_old is None: # we didn't have a release before, but now we do. logger.postprocess("SUCCESS: Movie {0} has now been added to CouchPotato with release status of [{1}]".format( title, str(release_status_new).upper()), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] if release_status_new != release_status_old: logger.postprocess("SUCCESS: Release for {0} has now been marked with a status of [{1}]".format( title, str(release_status_new).upper()), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] except: pass elif scan_id: - url = "{0}/{1}".format(baseURL, scan_id) + url = "{0}/{1}".format(base_url, scan_id) command_status = self.command_complete(url, params, headers, section) if command_status: logger.debug("The Scan command return status: {0}".format(command_status), section) if command_status in ['completed']: logger.debug("The Scan command has completed successfully. Renaming was successful.", section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] elif command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) - # return [1, "%s: Failed to post-process %s" % (section, inputName) ] + # return [1, "%s: Failed to post-process %s" % (section, input_name) ] - if not os.path.isdir(dirName): + if not os.path.isdir(dir_name): logger.postprocess("SUCCESS: Input Directory [{0}] has been processed and removed".format( - dirName), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + dir_name), section) + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] - elif not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=True): + elif not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=True): logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format( - dirName), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + dir_name), section) + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] # pause and let CouchPotatoServer/Radarr catch its breath time.sleep(10 * wait_for) @@ -462,6 +464,6 @@ class autoProcessMovie(object): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] logger.warning( - "{0} does not appear to have changed status after {1} minutes, Please check your logs.".format(inputName, wait_for), + "{0} does not appear to have changed status after {1} minutes, Please check your logs.".format(input_name, wait_for), section) return [1, "{0}: Failed to post-process - No change in status".format(section)] diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 54192d58..f931c42e 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -97,6 +97,9 @@ class autoProcessMusic(object): return [2, "no change"] def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): + dir_name = dirName + input_name = inputName + status = int(status) cfg = dict(core.CFG[section][inputCategory]) @@ -124,25 +127,25 @@ class autoProcessMusic(object): logger.error("Server did not respond. Exiting", section) return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] - if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. - dirName = os.path.split(os.path.normpath(dirName))[0] + if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name. + dir_name = os.path.split(os.path.normpath(dir_name))[0] - SpecificPath = os.path.join(dirName, str(inputName)) - cleanName = os.path.splitext(SpecificPath) - if cleanName[1] == ".nzb": - SpecificPath = cleanName[0] - if os.path.isdir(SpecificPath): - dirName = SpecificPath + specific_path = os.path.join(dir_name, str(input_name)) + clean_name = os.path.splitext(specific_path) + if clean_name[1] == ".nzb": + specific_path = clean_name[0] + if os.path.isdir(specific_path): + dir_name = specific_path - process_all_exceptions(inputName, dirName) - inputName, dirName = convert_to_ascii(inputName, dirName) + process_all_exceptions(input_name, dir_name) + input_name, dir_name = convert_to_ascii(input_name, dir_name) - if not listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: - logger.debug('Checking for archives to extract in directory: {0}'.format(dirName)) - core.extractFiles(dirName) - inputName, dirName = convert_to_ascii(inputName, dirName) + if not listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) + core.extractFiles(dir_name) + input_name, dir_name = convert_to_ascii(input_name, dir_name) - #if listMediaFiles(dirName, media=False, audio=True, meta=False, archives=False) and status: + #if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: # logger.info("Status shown as failed from Downloader, but valid video files found. Setting as successful.", section) # status = 0 @@ -151,20 +154,20 @@ class autoProcessMusic(object): params = { 'apikey': apikey, 'cmd': "forceProcess", - 'dir': remoteDir(dirName) if remote_path else dirName + 'dir': remoteDir(dir_name) if remote_path else dir_name } - res = self.forceProcess(params, url, apikey, inputName, dirName, section, wait_for) + res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) if res[0] in [0, 1]: return res params = { 'apikey': apikey, 'cmd': "forceProcess", - 'dir': os.path.split(remoteDir(dirName))[0] if remote_path else os.path.split(dirName)[0] + 'dir': os.path.split(remoteDir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] } - res = self.forceProcess(params, url, apikey, inputName, dirName, section, wait_for) + res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) if res[0] in [0, 1]: return res @@ -176,11 +179,11 @@ class autoProcessMusic(object): url = "{0}{1}:{2}{3}/api/v1/command".format(protocol, host, port, web_root) headers = {"X-Api-Key": apikey} if remote_path: - logger.debug("remote_path: {0}".format(remoteDir(dirName)), section) - data = {"name": "Rename", "path": remoteDir(dirName)} + logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) + data = {"name": "Rename", "path": remoteDir(dir_name)} else: - logger.debug("path: {0}".format(dirName), section) - data = {"name": "Rename", "path": dirName} + logger.debug("path: {0}".format(dir_name), section) + data = {"name": "Rename", "path": dir_name} data = json.dumps(data) try: logger.debug("Opening URL: {0} with data: {1}".format(url, data), section) @@ -189,18 +192,18 @@ class autoProcessMusic(object): logger.error("Unable to open URL: {0}".format(url), section) return [1, "{0}: Failed to post-process - Unable to connect to {1}".format(section, section)] - Success = False - Queued = False - Started = False + success = False + queued = False + started = False try: res = json.loads(r.content) scan_id = int(res['id']) logger.debug("Scan started with id: {0}".format(scan_id), section) - Started = True + started = True except Exception as e: logger.warning("No scan id was returned due to: {0}".format(e), section) scan_id = None - Started = False + started = False return [1, "{0}: Failed to post-process - Unable to start scan".format(section)] n = 0 @@ -214,15 +217,15 @@ class autoProcessMusic(object): n += 1 if command_status: logger.debug("The Scan command return status: {0}".format(command_status), section) - if not os.path.exists(dirName): - logger.debug("The directory {0} has been removed. Renaming was successful.".format(dirName), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + if not os.path.exists(dir_name): + logger.debug("The directory {0} has been removed. Renaming was successful.".format(dir_name), section) + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] elif command_status and command_status in ['completed']: logger.debug("The Scan command has completed successfully. Renaming was successful.", section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) - # return [1, "%s: Failed to post-process %s" % (section, inputName) ] + # return [1, "%s: Failed to post-process %s" % (section, input_name) ] else: logger.debug("The Scan command did not return status completed. Passing back to {0} to attempt complete download handling.".format(section), section) return [status, "{0}: Passing back to {1} to attempt Complete Download Handling".format(section, section)] @@ -233,7 +236,7 @@ class autoProcessMusic(object): return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader. else: logger.warning("FAILED DOWNLOAD DETECTED", section) - if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) - rmDir(dirName) + if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: + logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) + rmDir(dir_name) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. \ No newline at end of file diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 473dad91..3175a852 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -76,7 +76,7 @@ class autoProcessTV(object): return [1, "{0}: Failed to post-process - {1} did not respond.".format(section, section)] delete_failed = int(cfg.get("delete_failed", 0)) - nzbExtractionBy = cfg.get("nzbExtractionBy", "Downloader") + nzb_extraction_by = cfg.get("nzbExtractionBy", "Downloader") process_method = cfg.get("process_method") if clientAgent == core.TORRENT_CLIENTAGENT and core.USELINK == "move-sym": process_method = "symlink" @@ -91,47 +91,47 @@ class autoProcessTV(object): else: extract = int(cfg.get("extract", 0)) #get importmode, default to "Move" for consistency with legacy - importMode = cfg.get("importMode","Move") + import_mode = cfg.get("importMode","Move") - if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. - dirName = os.path.split(os.path.normpath(dirName))[0] + if not os.path.isdir(dir_name) and os.path.isfile(dir_name): # If the input directory is a file, assume single file download and split dir/name. + dir_name = os.path.split(os.path.normpath(dir_name))[0] - SpecificPath = os.path.join(dirName, str(inputName)) - cleanName = os.path.splitext(SpecificPath) - if cleanName[1] == ".nzb": - SpecificPath = cleanName[0] - if os.path.isdir(SpecificPath): - dirName = SpecificPath + specific_path = os.path.join(dir_name, str(input_name)) + clean_name = os.path.splitext(specific_path) + if clean_name[1] == ".nzb": + specific_path = clean_name[0] + if os.path.isdir(specific_path): + dir_name = specific_path # Attempt to create the directory if it doesn't exist and ignore any # error stating that it already exists. This fixes a bug where SickRage # won't process the directory because it doesn't exist. try: - os.makedirs(dirName) # Attempt to create the directory + os.makedirs(dir_name) # Attempt to create the directory except OSError as e: # Re-raise the error if it wasn't about the directory not existing if e.errno != errno.EEXIST: raise - if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzbExtractionBy != "Destination"): - if inputName: - process_all_exceptions(inputName, dirName) - inputName, dirName = convert_to_ascii(inputName, dirName) + if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != "Destination"): + if input_name: + process_all_exceptions(input_name, dir_name) + input_name, dir_name = convert_to_ascii(input_name, dir_name) # Now check if tv files exist in destination. - if not listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): - if listMediaFiles(dirName, media=False, audio=False, meta=False, archives=True) and extract: - logger.debug('Checking for archives to extract in directory: {0}'.format(dirName)) - core.extractFiles(dirName) - inputName, dirName = convert_to_ascii(inputName, dirName) + if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + if listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) + core.extractFiles(dir_name) + input_name, dir_name = convert_to_ascii(input_name, dir_name) - if listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. - flatten(dirName) + if listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. + flatten(dir_name) # Check video files for corruption good_files = 0 num_files = 0 - for video in listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False): + for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 if transcoder.isVideoGood(video, status): good_files += 1 @@ -150,9 +150,9 @@ class autoProcessTV(object): if failureLink: failureLink += '&corrupt=true' elif clientAgent == "manual": - logger.warning("No media files found in directory {0} to manually process.".format(dirName), section) + logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section) return [0, ""] # Success (as far as this script is concerned) - elif nzbExtractionBy == "Destination": + elif nzb_extraction_by == "Destination": logger.info("Check for media files ignored because nzbExtractionBy is set to Destination.") if int(failed) == 0: logger.info("Setting Status Success.") @@ -163,32 +163,32 @@ class autoProcessTV(object): status = 1 failed = 1 else: - logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dirName), section) + logger.warning("No media files found in directory {0}. Processing this as a failed download".format(dir_name), section) status = 1 failed = 1 if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads - result, newDirName = transcoder.Transcode_directory(dirName) + result, new_dir_name = transcoder.Transcode_directory(dir_name) if result == 0: - logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dirName), section) - dirName = newDirName + logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dir_name), section) + dir_name = new_dir_name chmod_directory = int(str(cfg.get("chmodDirectory", "0")), 8) logger.debug("Config setting 'chmodDirectory' currently set to {0}".format(oct(chmod_directory)), section) if chmod_directory: - logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dirName), section) - core.rchmod(dirName, chmod_directory) + logger.info("Attempting to set the octal permission of '{0}' on directory '{1}'".format(oct(chmod_directory), dir_name), section) + core.rchmod(dir_name, chmod_directory) else: - logger.error("FAILED: Transcoding failed for files in {0}".format(dirName), section) + logger.error("FAILED: Transcoding failed for files in {0}".format(dir_name), section) return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] # configure SB params to pass fork_params['quiet'] = 1 fork_params['proc_type'] = 'manual' - if inputName is not None: - fork_params['nzbName'] = inputName + if input_name is not None: + fork_params['nzbName'] = input_name for param in copy.copy(fork_params): if param == "failed": @@ -206,10 +206,10 @@ class autoProcessTV(object): if "proc_type" in fork_params: del fork_params['proc_type'] - if param in ["dirName", "dir", "proc_dir", "process_directory", "path"]: - fork_params[param] = dirName + if param in ["dir_name", "dir", "proc_dir", "process_directory", "path"]: + fork_params[param] = dir_name if remote_path: - fork_params[param] = remoteDir(dirName) + fork_params[param] = remoteDir(dir_name) if param == "process_method": if process_method: @@ -244,7 +244,7 @@ class autoProcessTV(object): if status == 0: if section == "NzbDrone" and not apikey: logger.info('No Sonarr apikey entered. Processing completed.') - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section) else: core.FAILED = True @@ -257,9 +257,9 @@ class autoProcessTV(object): return [1, "{0}: Download Failed. Sending back to {1}".format(section, section)] # Return as failed to flag this in the downloader. else: logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section) - if delete_failed and os.path.isdir(dirName) and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) - rmDir(dirName) + if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: + logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) + rmDir(dir_name) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. url = None @@ -274,11 +274,11 @@ class autoProcessTV(object): headers = {"X-Api-Key": apikey} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: - logger.debug("remote_path: {0}".format(remoteDir(dirName)), section) - data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dirName), "downloadClientId": download_id, "importMode": importMode} + logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) + data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dir_name), "downloadClientId": download_id, "importMode": import_mode} else: - logger.debug("path: {0}".format(dirName), section) - data = {"name": "DownloadedEpisodesScan", "path": dirName, "downloadClientId": download_id, "importMode": importMode} + logger.debug("path: {0}".format(dir_name), section) + data = {"name": "DownloadedEpisodesScan", "path": dir_name, "downloadClientId": download_id, "importMode": import_mode} if not download_id: data.pop("downloadClientId") data = json.dumps(data) @@ -306,45 +306,45 @@ class autoProcessTV(object): logger.error("Server returned status {0}".format(r.status_code), section) return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] - Success = False - Queued = False - Started = False + success = False + queued = False + started = False if section == "SickBeard": if apikey: if r.json()['result'] == 'success': - Success = True + success = True else: for line in r.iter_lines(): if line: line = line.decode('utf-8') logger.postprocess("{0}".format(line), section) if "Moving file from" in line: - inputName = os.path.split(line)[1] + input_name = os.path.split(line)[1] if "added to the queue" in line: - Queued = True + queued = True if "Processing succeeded" in line or "Successfully processed" in line: - Success = True + success = True - if Queued: + if queued: time.sleep(60) elif section == "NzbDrone": try: res = json.loads(r.content) scan_id = int(res['id']) logger.debug("Scan started with id: {0}".format(scan_id), section) - Started = True + started = True except Exception as e: logger.warning("No scan id was returned due to: {0}".format(e), section) scan_id = None - Started = False + started = False - if status != 0 and delete_failed and not os.path.dirname(dirName) == dirName: - logger.postprocess("Deleting failed files and folder {0}".format(dirName), section) - rmDir(dirName) + if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name: + logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) + rmDir(dir_name) - if Success: - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] - elif section == "NzbDrone" and Started: + if success: + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] + elif section == "NzbDrone" and started: n = 0 params = {} url = "{0}/{1}".format(url, scan_id) @@ -356,20 +356,20 @@ class autoProcessTV(object): n += 1 if command_status: logger.debug("The Scan command return status: {0}".format(command_status), section) - if not os.path.exists(dirName): - logger.debug("The directory {0} has been removed. Renaming was successful.".format(dirName), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + if not os.path.exists(dir_name): + logger.debug("The directory {0} has been removed. Renaming was successful.".format(dir_name), section) + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] elif command_status and command_status in ['completed']: logger.debug("The Scan command has completed successfully. Renaming was successful.", section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) - # return [1, "%s: Failed to post-process %s" % (section, inputName) ] + # return [1, "%s: Failed to post-process %s" % (section, input_name) ] if self.CDH(url2, headers, section=section): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] else: logger.warning("The Scan command did not return a valid status. Renaming was not successful.", section) - return [1, "{0}: Failed to post-process {1}".format(section, inputName)] + return [1, "{0}: Failed to post-process {1}".format(section, input_name)] else: return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] # We did not receive Success confirmation. diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index c32d9c9a..4f3b4454 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -22,7 +22,7 @@ def extract(filePath, outputDestination): invislocation = os.path.join(core.PROGRAM_DIR, 'core', 'extractor', 'bin', 'invisible.vbs') cmd_7zip = [wscriptlocation, invislocation, str(core.SHOWEXTRACT), core.SEVENZIP, "x", "-y"] ext_7zip = [".rar", ".zip", ".tar.gz", "tgz", ".tar.bz2", ".tbz", ".tar.lzma", ".tlz", ".7z", ".xz"] - EXTRACT_COMMANDS = dict.fromkeys(ext_7zip, cmd_7zip) + extract_commands = dict.fromkeys(ext_7zip, cmd_7zip) # Using unix else: required_cmds = ["unrar", "unzip", "tar", "unxz", "unlzma", "7zr", "bunzip2"] @@ -33,7 +33,7 @@ def extract(filePath, outputDestination): # ".lzma": ["xz", "-d --format=lzma --keep"], # ".bz2": ["bzip2", "-d --keep"], - EXTRACT_COMMANDS = { + extract_commands = { ".rar": ["unrar", "x", "-o+", "-y"], ".tar": ["tar", "-xf"], ".zip": ["unzip"], @@ -49,24 +49,24 @@ def extract(filePath, outputDestination): for cmd in required_cmds: if call(['which', cmd], stdout=devnull, stderr=devnull): # note, returns 0 if exists, or 1 if doesn't exist. - for k, v in EXTRACT_COMMANDS.items(): + for k, v in extract_commands.items(): if cmd in v[0]: if not call(["which", "7zr"], stdout=devnull, stderr=devnull): # we do have "7zr" - EXTRACT_COMMANDS[k] = ["7zr", "x", "-y"] + extract_commands[k] = ["7zr", "x", "-y"] elif not call(["which", "7z"], stdout=devnull, stderr=devnull): # we do have "7z" - EXTRACT_COMMANDS[k] = ["7z", "x", "-y"] + extract_commands[k] = ["7z", "x", "-y"] elif not call(["which", "7za"], stdout=devnull, stderr=devnull): # we do have "7za" - EXTRACT_COMMANDS[k] = ["7za", "x", "-y"] + extract_commands[k] = ["7za", "x", "-y"] else: core.logger.error("EXTRACTOR: {cmd} not found, " "disabling support for {feature}".format (cmd=cmd, feature=k)) - del EXTRACT_COMMANDS[k] + del extract_commands[k] devnull.close() else: core.logger.warning("EXTRACTOR: Cannot determine which tool to use when called from Transmission") - if not EXTRACT_COMMANDS: + if not extract_commands: core.logger.warning("EXTRACTOR: No archive extracting programs found, plugin will be disabled") ext = os.path.splitext(filePath) @@ -74,14 +74,14 @@ def extract(filePath, outputDestination): if ext[1] in (".gz", ".bz2", ".lzma"): # Check if this is a tar if os.path.splitext(ext[0])[1] == ".tar": - cmd = EXTRACT_COMMANDS[".tar{ext}".format(ext=ext[1])] + cmd = extract_commands[".tar{ext}".format(ext=ext[1])] elif ext[1] in (".1", ".01", ".001") and os.path.splitext(ext[0])[1] in (".rar", ".zip", ".7z"): - cmd = EXTRACT_COMMANDS[os.path.splitext(ext[0])[1]] + cmd = extract_commands[os.path.splitext(ext[0])[1]] elif ext[1] in (".cb7", ".cba", ".cbr", ".cbt", ".cbz"): # don't extract these comic book archives. return False else: - if ext[1] in EXTRACT_COMMANDS: - cmd = EXTRACT_COMMANDS[ext[1]] + if ext[1] in extract_commands: + cmd = extract_commands[ext[1]] else: core.logger.debug("EXTRACTOR: Unknown file type: {ext}".format (ext=ext[1])) @@ -100,13 +100,13 @@ def extract(filePath, outputDestination): core.logger.debug("Extracting {cmd} {file} {destination}".format (cmd=cmd, file=filePath, destination=outputDestination)) - origFiles = [] - origDirs = [] + orig_files = [] + orig_dirs = [] for dir, subdirs, files in os.walk(outputDestination): for subdir in subdirs: - origDirs.append(os.path.join(dir, subdir)) + orig_dirs.append(os.path.join(dir, subdir)) for file in files: - origFiles.append(os.path.join(dir, file)) + orig_files.append(os.path.join(dir, file)) pwd = os.getcwd() # Get our Present Working Directory os.chdir(outputDestination) # Not all unpack commands accept full paths, so just extract into this directory @@ -162,13 +162,13 @@ def extract(filePath, outputDestination): perms = stat.S_IMODE(os.lstat(os.path.split(filePath)[0]).st_mode) for dir, subdirs, files in os.walk(outputDestination): for subdir in subdirs: - if not os.path.join(dir, subdir) in origFiles: + if not os.path.join(dir, subdir) in orig_files: try: os.chmod(os.path.join(dir, subdir), perms) except: pass for file in files: - if not os.path.join(dir, file) in origFiles: + if not os.path.join(dir, file) in orig_files: try: shutil.copymode(filePath, os.path.join(dir, file)) except: diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 7dfdc3b6..8df8c313 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -79,13 +79,13 @@ def autoFork(section, inputCategory): r = [] if r and r.ok: if apikey: - optionalParameters = [] + optional_parameters = [] try: - optionalParameters = r.json()['data']['optionalParameters'].keys() + optional_parameters = r.json()['data']['optionalParameters'].keys() except: - optionalParameters = r.json()['data']['data']['optionalParameters'].keys() + optional_parameters = r.json()['data']['data']['optionalParameters'].keys() for param in params: - if param not in optionalParameters: + if param not in optional_parameters: rem_params.append(param) else: for param in params: diff --git a/core/nzbToMediaConfig.py b/core/nzbToMediaConfig.py index 7f915936..6ac42645 100644 --- a/core/nzbToMediaConfig.py +++ b/core/nzbToMediaConfig.py @@ -251,7 +251,7 @@ class ConfigObj(configobj.ConfigObj, Section): @staticmethod def addnzbget(): # load configs into memory - CFG_NEW = config() + cfg_new = config() try: if 'NZBPO_NDCATEGORY' in os.environ and 'NZBPO_SBCATEGORY' in os.environ: @@ -274,196 +274,196 @@ class ConfigObj(configobj.ConfigObj, Section): if key in os.environ: option = 'default_downloadDirectory' value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "General" - envKeys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE', 'NO_EXTRACT_FAILED'] - cfgKeys = ['auto_update', 'check_media', 'safe_mode', 'no_extract_failed'] - for index in range(len(envKeys)): - key = 'NZBPO_{index}'.format(index=envKeys[index]) + env_keys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE', 'NO_EXTRACT_FAILED'] + cfg_keys = ['auto_update', 'check_media', 'safe_mode', 'no_extract_failed'] + for index in range(len(env_keys)): + key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "Network" - envKeys = ['MOUNTPOINTS'] - cfgKeys = ['mount_points'] - for index in range(len(envKeys)): - key = 'NZBPO_{index}'.format(index=envKeys[index]) + env_keys = ['MOUNTPOINTS'] + cfg_keys = ['mount_points'] + for index in range(len(env_keys)): + key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "CouchPotato" - envCatKey = 'NZBPO_CPSCATEGORY' - envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', + env_cat_key = 'NZBPO_CPSCATEGORY' + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH', 'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY'] - cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path', 'wait_for', 'watch_dir', 'omdbapikey'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_CPS{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_CPS{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - if os.environ[envCatKey] in CFG_NEW['Radarr'].sections: - CFG_NEW['Radarr'][envCatKey]['enabled'] = 0 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 + if os.environ[env_cat_key] in cfg_new['Radarr'].sections: + cfg_new['Radarr'][env_cat_key]['enabled'] = 0 section = "SickBeard" - envCatKey = 'NZBPO_SBCATEGORY' - envKeys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', + env_cat_key = 'NZBPO_SBCATEGORY' + env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] - cfgKeys = ['enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', + cfg_keys = ['enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_SB{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_SB{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - if os.environ[envCatKey] in CFG_NEW['NzbDrone'].sections: - CFG_NEW['NzbDrone'][envCatKey]['enabled'] = 0 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 + if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections: + cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0 section = "HeadPhones" - envCatKey = 'NZBPO_HPCATEGORY' - envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH', 'DELETE_FAILED'] - cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path', 'delete_failed'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_HP{index}'.format(index=envKeys[index]) + env_cat_key = 'NZBPO_HPCATEGORY' + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WAIT_FOR', 'WATCH_DIR', 'REMOTE_PATH', 'DELETE_FAILED'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'wait_for', 'watch_dir', 'remote_path', 'delete_failed'] + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_HP{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - if os.environ[envCatKey] in CFG_NEW['Lidarr'].sections: - CFG_NEW['Lidarr'][envCatKey]['enabled'] = 0 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 + if os.environ[env_cat_key] in cfg_new['Lidarr'].sections: + cfg_new['Lidarr'][env_cat_key]['enabled'] = 0 section = "Mylar" - envCatKey = 'NZBPO_MYCATEGORY' - envKeys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', + env_cat_key = 'NZBPO_MYCATEGORY' + env_keys = ['ENABLED', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'APIKEY', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH'] - cfgKeys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', + cfg_keys = ['enabled', 'host', 'port', 'username', 'password', 'apikey', 'ssl', 'web_root', 'watch_dir', 'remote_path'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_MY{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_MY{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 section = "Gamez" - envCatKey = 'NZBPO_GZCATEGORY' - envKeys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH'] - cfgKeys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_GZ{index}'.format(index=envKeys[index]) + env_cat_key = 'NZBPO_GZCATEGORY' + env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'LIBRARY', 'REMOTE_PATH'] + cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'library', 'remote_path'] + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_GZ{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 section = "NzbDrone" - envCatKey = 'NZBPO_NDCATEGORY' - envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', + env_cat_key = 'NZBPO_NDCATEGORY' + env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH', 'IMPORTMODE'] #new cfgKey added for importMode - cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', + cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path','importMode'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_ND{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_ND{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - if os.environ[envCatKey] in CFG_NEW['SickBeard'].sections: - CFG_NEW['SickBeard'][envCatKey]['enabled'] = 0 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 + if os.environ[env_cat_key] in cfg_new['SickBeard'].sections: + cfg_new['SickBeard'][env_cat_key]['enabled'] = 0 section = "Radarr" - envCatKey = 'NZBPO_RACATEGORY' - envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', + env_cat_key = 'NZBPO_RACATEGORY' + env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH', 'OMDBAPIKEY', 'IMPORTMODE'] #new cfgKey added for importMode - cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', + cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path', 'omdbapikey','importMode'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_RA{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_RA{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - if os.environ[envCatKey] in CFG_NEW['CouchPotato'].sections: - CFG_NEW['CouchPotato'][envCatKey]['enabled'] = 0 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 + if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections: + cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0 section = "Lidarr" - envCatKey = 'NZBPO_LICATEGORY' - envKeys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', + env_cat_key = 'NZBPO_LICATEGORY' + env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'WAIT_FOR', 'DELETE_FAILED', 'REMOTE_PATH'] - cfgKeys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', + cfg_keys = ['enabled', 'host', 'apikey', 'port', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'wait_for', 'delete_failed', 'remote_path'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_LI{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_LI{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 - if os.environ[envCatKey] in CFG_NEW['HeadPhones'].sections: - CFG_NEW['HeadPhones'][envCatKey]['enabled'] = 0 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 + if os.environ[env_cat_key] in cfg_new['HeadPhones'].sections: + cfg_new['HeadPhones'][env_cat_key]['enabled'] = 0 section = "Extensions" - envKeys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS'] - cfgKeys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] - for index in range(len(envKeys)): - key = 'NZBPO_{index}'.format(index=envKeys[index]) + env_keys = ['COMPRESSEDEXTENSIONS', 'MEDIAEXTENSIONS', 'METAEXTENSIONS'] + cfg_keys = ['compressedExtensions', 'mediaExtensions', 'metaExtensions'] + for index in range(len(env_keys)): + key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "Posix" - envKeys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] - cfgKeys = ['niceness', 'ionice_class', 'ionice_classdata'] - for index in range(len(envKeys)): - key = 'NZBPO_{index}'.format(index=envKeys[index]) + env_keys = ['NICENESS', 'IONICE_CLASS', 'IONICE_CLASSDATA'] + cfg_keys = ['niceness', 'ionice_class', 'ionice_classdata'] + for index in range(len(env_keys)): + key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "Transcoder" - envKeys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', + env_keys = ['TRANSCODE', 'DUPLICATE', 'IGNOREEXTENSIONS', 'OUTPUTFASTSTART', 'OUTPUTVIDEOPATH', 'PROCESSOUTPUT', 'AUDIOLANGUAGE', 'ALLAUDIOLANGUAGES', 'SUBLANGUAGES', 'ALLSUBLANGUAGES', 'EMBEDSUBS', 'BURNINSUBTITLE', 'EXTRACTSUBS', 'EXTERNALSUBDIR', 'OUTPUTDEFAULT', 'OUTPUTVIDEOEXTENSION', 'OUTPUTVIDEOCODEC', 'VIDEOCODECALLOW', @@ -473,7 +473,7 @@ class ConfigObj(configobj.ConfigObj, Section): 'OUTPUTAUDIOOTHERCODEC', 'AUDIOOTHERCODECALLOW', 'OUTPUTAUDIOOTHERBITRATE', 'OUTPUTSUBTITLECODEC', 'OUTPUTAUDIOCHANNELS', 'OUTPUTAUDIOTRACK2CHANNELS', 'OUTPUTAUDIOOTHERCHANNELS','OUTPUTVIDEORESOLUTION'] - cfgKeys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', + cfg_keys = ['transcode', 'duplicate', 'ignoreExtensions', 'outputFastStart', 'outputVideoPath', 'processOutput', 'audioLanguage', 'allAudioLanguages', 'subLanguages', 'allSubLanguages', 'embedSubs', 'burnInSubtitle', 'extractSubs', 'externalSubDir', 'outputDefault', 'outputVideoExtension', 'outputVideoCodec', 'VideoCodecAllow', @@ -483,51 +483,51 @@ class ConfigObj(configobj.ConfigObj, Section): 'outputAudioOtherCodec', 'AudioOtherCodecAllow', 'outputAudioOtherBitrate', 'outputSubtitleCodec', 'outputAudioChannels', 'outputAudioTrack2Channels', 'outputAudioOtherChannels', 'outputVideoResolution'] - for index in range(len(envKeys)): - key = 'NZBPO_{index}'.format(index=envKeys[index]) + for index in range(len(env_keys)): + key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "WakeOnLan" - envKeys = ['WAKE', 'HOST', 'PORT', 'MAC'] - cfgKeys = ['wake', 'host', 'port', 'mac'] - for index in range(len(envKeys)): - key = 'NZBPO_WOL{index}'.format(index=envKeys[index]) + env_keys = ['WAKE', 'HOST', 'PORT', 'MAC'] + cfg_keys = ['wake', 'host', 'port', 'mac'] + for index in range(len(env_keys)): + key = 'NZBPO_WOL{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - CFG_NEW[section][option] = value + cfg_new[section][option] = value section = "UserScript" - envCatKey = 'NZBPO_USCATEGORY' - envKeys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', + env_cat_key = 'NZBPO_USCATEGORY' + env_keys = ['USER_SCRIPT_MEDIAEXTENSIONS', 'USER_SCRIPT_PATH', 'USER_SCRIPT_PARAM', 'USER_SCRIPT_RUNONCE', 'USER_SCRIPT_SUCCESSCODES', 'USER_SCRIPT_CLEAN', 'USDELAY', 'USREMOTE_PATH'] - cfgKeys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', + cfg_keys = ['user_script_mediaExtensions', 'user_script_path', 'user_script_param', 'user_script_runOnce', 'user_script_successCodes', 'user_script_clean', 'delay', 'remote_path'] - if envCatKey in os.environ: - for index in range(len(envKeys)): - key = 'NZBPO_{index}'.format(index=envKeys[index]) + if env_cat_key in os.environ: + for index in range(len(env_keys)): + key = 'NZBPO_{index}'.format(index=env_keys[index]) if key in os.environ: - option = cfgKeys[index] + option = cfg_keys[index] value = os.environ[key] - if os.environ[envCatKey] not in CFG_NEW[section].sections: - CFG_NEW[section][os.environ[envCatKey]] = {} - CFG_NEW[section][os.environ[envCatKey]][option] = value - CFG_NEW[section][os.environ[envCatKey]]['enabled'] = 1 + if os.environ[env_cat_key] not in cfg_new[section].sections: + cfg_new[section][os.environ[env_cat_key]] = {} + cfg_new[section][os.environ[env_cat_key]][option] = value + cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 except Exception as error: logger.debug("Error {msg} when applying NZBGet config".format(msg=error)) try: # write our new config to autoProcessMedia.cfg - CFG_NEW.filename = core.CONFIG_FILE - CFG_NEW.write() + cfg_new.filename = core.CONFIG_FILE + cfg_new.write() except Exception as error: logger.debug("Error {msg} when writing changes to .cfg".format(msg=error)) - return CFG_NEW + return cfg_new configobj.Section = Section diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 83761663..2b555203 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -52,7 +52,7 @@ class DBConnection(object): if query is None: return - sqlResult = None + sql_result = None attempt = 0 while attempt < 5: @@ -61,13 +61,13 @@ class DBConnection(object): logger.log("{name}: {query}".format(name=self.filename, query=query), logger.DB) cursor = self.connection.cursor() cursor.execute(query) - sqlResult = cursor.fetchone()[0] + sql_result = cursor.fetchone()[0] else: logger.log("{name}: {query} with args {args}".format (name=self.filename, query=query, args=args), logger.DB) cursor = self.connection.cursor() cursor.execute(query, args) - sqlResult = cursor.fetchone()[0] + sql_result = cursor.fetchone()[0] # get out of the connection attempt loop since we were successful break @@ -83,13 +83,13 @@ class DBConnection(object): logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR) raise - return sqlResult + return sql_result def mass_action(self, querylist, logTransaction=False): if querylist is None: return - sqlResult = [] + sql_result = [] attempt = 0 while attempt < 5: @@ -98,16 +98,16 @@ class DBConnection(object): if len(qu) == 1: if logTransaction: logger.log(qu[0], logger.DEBUG) - sqlResult.append(self.connection.execute(qu[0])) + sql_result.append(self.connection.execute(qu[0])) elif len(qu) > 1: if logTransaction: logger.log(u"{query} with args {args}".format(query=qu[0], args=qu[1]), logger.DEBUG) - sqlResult.append(self.connection.execute(qu[0], qu[1])) + sql_result.append(self.connection.execute(qu[0], qu[1])) self.connection.commit() logger.log(u"Transaction with {x} query's executed".format(x=len(querylist)), logger.DEBUG) - return sqlResult + return sql_result except sqlite3.OperationalError as error: - sqlResult = [] + sql_result = [] if self.connection: self.connection.rollback() if "unable to open database file" in error.args[0] or "database is locked" in error.args[0]: @@ -123,24 +123,24 @@ class DBConnection(object): logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR) raise - return sqlResult + return sql_result def action(self, query, args=None): if query is None: return - sqlResult = None + sql_result = None attempt = 0 while attempt < 5: try: if args is None: logger.log(u"{name}: {query}".format(name=self.filename, query=query), logger.DB) - sqlResult = self.connection.execute(query) + sql_result = self.connection.execute(query) else: logger.log(u"{name}: {query} with args {args}".format (name=self.filename, query=query, args=args), logger.DB) - sqlResult = self.connection.execute(query, args) + sql_result = self.connection.execute(query, args) self.connection.commit() # get out of the connection attempt loop since we were successful break @@ -156,22 +156,22 @@ class DBConnection(object): logger.log(u"Fatal error executing query: {msg}".format(msg=error), logger.ERROR) raise - return sqlResult + return sql_result def select(self, query, args=None): - sqlResults = self.action(query, args).fetchall() + sql_results = self.action(query, args).fetchall() - if sqlResults is None: + if sql_results is None: return [] - return sqlResults + return sql_results def upsert(self, tableName, valueDict, keyDict): - changesBefore = self.connection.total_changes + changes_before = self.connection.total_changes - genParams = lambda myDict: ["{key} = ?".format(key=k) for k in myDict.keys()] + gen_params = lambda myDict: ["{key} = ?".format(key=k) for k in myDict.keys()] items = list(valueDict.values()) + list(keyDict.values()) self.action( @@ -179,13 +179,13 @@ class DBConnection(object): "SET {params} " "WHERE {conditions}".format( table=tableName, - params=", ".join(genParams(valueDict)), - conditions=" AND ".join(genParams(keyDict)) + params=", ".join(gen_params(valueDict)), + conditions=" AND ".join(gen_params(keyDict)) ), items ) - if self.connection.total_changes == changesBefore: + if self.connection.total_changes == changes_before: self.action( "INSERT OR IGNORE INTO {table} ({columns}) " "VALUES ({values})".format( diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 7a9e1dd5..45cb6fce 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -34,8 +34,8 @@ def process_all_exceptions(name, dirname): rename_script(dirname) for filename in listMediaFiles(dirname): newfilename = None - parentDir = os.path.dirname(filename) - head, fileExtension = os.path.splitext(os.path.basename(filename)) + parent_dir = os.path.dirname(filename) + head, file_extension = os.path.splitext(os.path.basename(filename)) if reverse_pattern.search(head) is not None: exception = reverse_filename elif garbage_name.search(head) is not None: @@ -44,7 +44,7 @@ def process_all_exceptions(name, dirname): exception = None newfilename = filename if not newfilename: - newfilename = exception(filename, parentDir, name) + newfilename = exception(filename, parent_dir, name) if core.GROUPS: newfilename = strip_groups(newfilename) if newfilename != filename: @@ -55,29 +55,30 @@ def strip_groups(filename): if not core.GROUPS: return filename dirname, file = os.path.split(filename) - head, fileExtension = os.path.splitext(file) + head, file_extension = os.path.splitext(file) newname = head.replace(' ', '.') for group in core.GROUPS: newname = newname.replace(group, '') newname = newname.replace('[]', '') - newfile = newname + fileExtension - newfilePath = os.path.join(dirname, newfile) - return newfilePath + newfile = newname + file_extension + newfile_path = os.path.join(dirname, newfile) + return newfile_path def rename_file(filename, newfilePath): - if os.path.isfile(newfilePath): - newfilePath = os.path.splitext(newfilePath)[0] + ".NTM" + os.path.splitext(newfilePath)[1] + newfile_path = newfilePath + if os.path.isfile(newfile_path): + newfile_path = os.path.splitext(newfile_path)[0] + ".NTM" + os.path.splitext(newfile_path)[1] logger.debug("Replacing file name {old} with download name {new}".format - (old=filename, new=newfilePath), "EXCEPTION") + (old=filename, new=newfile_path), "EXCEPTION") try: - os.rename(filename, newfilePath) + os.rename(filename, newfile_path) except Exception as error: logger.error("Unable to rename file due to: {error}".format(error=error), "EXCEPTION") def replace_filename(filename, dirname, name): - head, fileExtension = os.path.splitext(os.path.basename(filename)) + head, file_extension = os.path.splitext(os.path.basename(filename)) if media_pattern.search(os.path.basename(dirname).replace(' ', '.')) is not None: newname = os.path.basename(dirname).replace(' ', '.') logger.debug("Replacing file name {old} with directory name {new}".format(old=head, new=newname), "EXCEPTION") @@ -88,13 +89,13 @@ def replace_filename(filename, dirname, name): else: logger.warning("No name replacement determined for {name}".format(name=head), "EXCEPTION") newname = name - newfile = newname + fileExtension - newfilePath = os.path.join(dirname, newfile) - return newfilePath + newfile = newname + file_extension + newfile_path = os.path.join(dirname, newfile) + return newfile_path def reverse_filename(filename, dirname, name): - head, fileExtension = os.path.splitext(os.path.basename(filename)) + head, file_extension = os.path.splitext(os.path.basename(filename)) na_parts = season_pattern.search(head) if na_parts is not None: word_p = word_pattern.findall(na_parts.group(2)) @@ -114,9 +115,9 @@ def reverse_filename(filename, dirname, name): newname = newname.replace(' ', '.') logger.debug("Reversing filename {old} to {new}".format (old=head, new=newname), "EXCEPTION") - newfile = newname + fileExtension - newfilePath = os.path.join(dirname, newfile) - return newfilePath + newfile = newname + file_extension + newfile_path = os.path.join(dirname, newfile) + return newfile_path def rename_script(dirname): diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index 046e0a69..da353892 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -50,10 +50,10 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): for dirpath, dirnames, filenames in os.walk(outputDestination): for file in filenames: - filePath = core.os.path.join(dirpath, file) - fileName, fileExtension = os.path.splitext(file) + file_path = core.os.path.join(dirpath, file) + file_name, file_extension = os.path.splitext(file) - if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or "all" in core.USER_SCRIPT_MEDIAEXTENSIONS: + if file_extension in core.USER_SCRIPT_MEDIAEXTENSIONS or "all" in core.USER_SCRIPT_MEDIAEXTENSIONS: num_files += 1 if core.USER_SCRIPT_RUNONCE == 1 and num_files > 1: # we have already run once, so just continue to get number of files. continue @@ -63,7 +63,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): command.append('{0}'.format(file)) continue elif param == "FP": - command.append('{0}'.format(filePath)) + command.append('{0}'.format(file_path)) continue elif param == "TN": command.append('{0}'.format(torrentName)) @@ -83,7 +83,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): cmd = "" for item in command: cmd = "{cmd} {item}".format(cmd=cmd, item=item) - logger.info("Running script {cmd} on file {path}.".format(cmd=cmd, path=filePath), "USERSCRIPT") + logger.info("Running script {cmd} on file {path}.".format(cmd=cmd, path=file_path), "USERSCRIPT") try: p = Popen(command) res = p.wait() @@ -104,9 +104,9 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): num_files_new = 0 for dirpath, dirnames, filenames in os.walk(outputDestination): for file in filenames: - fileName, fileExtension = os.path.splitext(file) + file_name, file_extension = os.path.splitext(file) - if fileExtension in core.USER_SCRIPT_MEDIAEXTENSIONS or core.USER_SCRIPT_MEDIAEXTENSIONS == "ALL": + if file_extension in core.USER_SCRIPT_MEDIAEXTENSIONS or core.USER_SCRIPT_MEDIAEXTENSIONS == "ALL": num_files_new += 1 if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 3b2b68fb..7d8bdcda 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -106,86 +106,89 @@ def remoteDir(path): def category_search(inputDirectory, inputName, inputCategory, root, categories): + input_directory = inputDirectory + input_category = inputCategory + input_name = inputName tordir = False try: - inputName = inputName.encode(core.SYS_ENCODING) + input_name = input_name.encode(core.SYS_ENCODING) except: pass try: - inputDirectory = inputDirectory.encode(core.SYS_ENCODING) + input_directory = input_directory.encode(core.SYS_ENCODING) except: pass - if inputDirectory is None: # =Nothing to process here. - return inputDirectory, inputName, inputCategory, root + if input_directory is None: # =Nothing to process here. + return input_directory, input_name, input_category, root - pathlist = os.path.normpath(inputDirectory).split(os.sep) + pathlist = os.path.normpath(input_directory).split(os.sep) - if inputCategory and inputCategory in pathlist: - logger.debug("SEARCH: Found the Category: {0} in directory structure".format(inputCategory)) - elif inputCategory: - logger.debug("SEARCH: Could not find the category: {0} in the directory structure".format(inputCategory)) + if input_category and input_category in pathlist: + logger.debug("SEARCH: Found the Category: {0} in directory structure".format(input_category)) + elif input_category: + logger.debug("SEARCH: Could not find the category: {0} in the directory structure".format(input_category)) else: try: - inputCategory = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category. - logger.debug("SEARCH: Found Category: {0} in directory structure".format(inputCategory)) + input_category = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category. + logger.debug("SEARCH: Found Category: {0} in directory structure".format(input_category)) except IndexError: - inputCategory = "" + input_category = "" logger.debug("SEARCH: Could not find a category in the directory structure") - if not os.path.isdir(inputDirectory) and os.path.isfile(inputDirectory): # If the input directory is a file - if not inputName: - inputName = os.path.split(os.path.normpath(inputDirectory))[1] - return inputDirectory, inputName, inputCategory, root + if not os.path.isdir(input_directory) and os.path.isfile(input_directory): # If the input directory is a file + if not input_name: + input_name = os.path.split(os.path.normpath(input_directory))[1] + return input_directory, input_name, input_category, root - if inputCategory and os.path.isdir(os.path.join(inputDirectory, inputCategory)): + if input_category and os.path.isdir(os.path.join(input_directory, input_category)): logger.info( - "SEARCH: Found category directory {0} in input directory directory {1}".format(inputCategory, inputDirectory)) - inputDirectory = os.path.join(inputDirectory, inputCategory) - logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) - if inputName and os.path.isdir(os.path.join(inputDirectory, inputName)): - logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format(inputName, inputDirectory)) - inputDirectory = os.path.join(inputDirectory, inputName) - logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) + "SEARCH: Found category directory {0} in input directory directory {1}".format(input_category, input_directory)) + input_directory = os.path.join(input_directory, input_category) + logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) + if input_name and os.path.isdir(os.path.join(input_directory, input_name)): + logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format(input_name, input_directory)) + input_directory = os.path.join(input_directory, input_name) + logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif inputName and os.path.isdir(os.path.join(inputDirectory, sanitizeName(inputName))): + elif input_name and os.path.isdir(os.path.join(input_directory, sanitizeName(input_name))): logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format( - sanitizeName(inputName), inputDirectory)) - inputDirectory = os.path.join(inputDirectory, sanitizeName(inputName)) - logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) + sanitizeName(input_name), input_directory)) + input_directory = os.path.join(input_directory, sanitizeName(input_name)) + logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif inputName and os.path.isfile(os.path.join(inputDirectory, inputName)): - logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format(inputName, inputDirectory)) - inputDirectory = os.path.join(inputDirectory, inputName) - logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) + elif input_name and os.path.isfile(os.path.join(input_directory, input_name)): + logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format(input_name, input_directory)) + input_directory = os.path.join(input_directory, input_name) + logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif inputName and os.path.isfile(os.path.join(inputDirectory, sanitizeName(inputName))): + elif input_name and os.path.isfile(os.path.join(input_directory, sanitizeName(input_name))): logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format( - sanitizeName(inputName), inputDirectory)) - inputDirectory = os.path.join(inputDirectory, sanitizeName(inputName)) - logger.info("SEARCH: Setting inputDirectory to {0}".format(inputDirectory)) + sanitizeName(input_name), input_directory)) + input_directory = os.path.join(input_directory, sanitizeName(input_name)) + logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True imdbid = [item for item in pathlist if '.cp(tt' in item] # This looks for the .cp(tt imdb id in the path. - if imdbid and '.cp(tt' not in inputName: - inputName = imdbid[0] # This ensures the imdb id is preserved and passed to CP + if imdbid and '.cp(tt' not in input_name: + input_name = imdbid[0] # This ensures the imdb id is preserved and passed to CP tordir = True - if inputCategory and not tordir: + if input_category and not tordir: try: - index = pathlist.index(inputCategory) + index = pathlist.index(input_category) if index + 1 < len(pathlist): tordir = True logger.info("SEARCH: Found a unique directory {0} in the category directory".format (pathlist[index + 1])) - if not inputName: - inputName = pathlist[index + 1] + if not input_name: + input_name = pathlist[index + 1] except ValueError: pass - if inputName and not tordir: - if inputName in pathlist or sanitizeName(inputName) in pathlist: - logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(inputName)) + if input_name and not tordir: + if input_name in pathlist or sanitizeName(input_name) in pathlist: + logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(input_name)) tordir = True else: root = 1 @@ -196,7 +199,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): logger.info("SEARCH: Could not find a unique directory for this download. Assume a common directory.") logger.info("SEARCH: We will try and determine which files to process, individually") - return inputDirectory, inputName, inputCategory, root + return input_directory, input_name, input_category, root def getDirSize(inputPath): @@ -209,19 +212,19 @@ def getDirSize(inputPath): def is_minSize(inputName, minSize): - fileName, fileExt = os.path.splitext(os.path.basename(inputName)) + file_name, file_ext = os.path.splitext(os.path.basename(inputName)) # audio files we need to check directory size not file size - inputSize = os.path.getsize(inputName) - if fileExt in core.AUDIOCONTAINER: + input_size = os.path.getsize(inputName) + if file_ext in core.AUDIOCONTAINER: try: - inputSize = getDirSize(os.path.dirname(inputName)) + input_size = getDirSize(os.path.dirname(inputName)) except: logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') return True # Ignore files under a certain size - if inputSize > minSize * 1048576: + if input_size > minSize * 1048576: return True @@ -309,13 +312,13 @@ def replace_links(link): def flatten(outputDestination): logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) for outputFile in listMediaFiles(outputDestination): - dirPath = os.path.dirname(outputFile) - fileName = os.path.basename(outputFile) + dir_path = os.path.dirname(outputFile) + file_name = os.path.basename(outputFile) - if dirPath == outputDestination: + if dir_path == outputDestination: continue - target = os.path.join(outputDestination, fileName) + target = os.path.join(outputDestination, file_name) try: shutil.move(outputFile, target) @@ -411,6 +414,7 @@ def WakeUp(): def CharReplace(Name): + name = Name # Special character hex range: # CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15) # UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF @@ -419,73 +423,76 @@ def CharReplace(Name): # If there is special character, detects if it is a UTF-8, CP850 or ISO-8859-15 encoding encoded = False encoding = None - if isinstance(Name, text_type): - return encoded, Name.encode(core.SYS_ENCODING) - for Idx in range(len(Name)): + if isinstance(name, text_type): + return encoded, name.encode(core.SYS_ENCODING) + for Idx in range(len(name)): # /!\ detection is done 2char by 2char for UTF-8 special character - if (len(Name) != 1) & (Idx < (len(Name) - 1)): + if (len(name) != 1) & (Idx < (len(name) - 1)): # Detect UTF-8 - if ((Name[Idx] == '\xC2') | (Name[Idx] == '\xC3')) & ( - (Name[Idx + 1] >= '\xA0') & (Name[Idx + 1] <= '\xFF')): + if ((name[Idx] == '\xC2') | (name[Idx] == '\xC3')) & ( + (name[Idx + 1] >= '\xA0') & (name[Idx + 1] <= '\xFF')): encoding = 'utf-8' break # Detect CP850 - elif (Name[Idx] >= '\x80') & (Name[Idx] <= '\xA5'): + elif (name[Idx] >= '\x80') & (name[Idx] <= '\xA5'): encoding = 'cp850' break # Detect ISO-8859-15 - elif (Name[Idx] >= '\xA6') & (Name[Idx] <= '\xFF'): + elif (name[Idx] >= '\xA6') & (name[Idx] <= '\xFF'): encoding = 'iso-8859-15' break else: # Detect CP850 - if (Name[Idx] >= '\x80') & (Name[Idx] <= '\xA5'): + if (name[Idx] >= '\x80') & (name[Idx] <= '\xA5'): encoding = 'cp850' break # Detect ISO-8859-15 - elif (Name[Idx] >= '\xA6') & (Name[Idx] <= '\xFF'): + elif (name[Idx] >= '\xA6') & (name[Idx] <= '\xFF'): encoding = 'iso-8859-15' break if encoding and not encoding == core.SYS_ENCODING: encoded = True - Name = Name.decode(encoding).encode(core.SYS_ENCODING) - return encoded, Name + name = name.decode(encoding).encode(core.SYS_ENCODING) + return encoded, name def convert_to_ascii(inputName, dirName): + input_name = inputName + dir_name = dirName + ascii_convert = int(core.CFG["ASCII"]["convert"]) if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!. - return inputName, dirName + return input_name, dir_name - encoded, inputName = CharReplace(inputName) + encoded, input_name = CharReplace(input_name) - dir, base = os.path.split(dirName) + dir, base = os.path.split(dir_name) if not base: # ended with "/" dir, base = os.path.split(dir) encoded, base2 = CharReplace(base) if encoded: - dirName = os.path.join(dir, base2) + dir_name = os.path.join(dir, base2) logger.info("Renaming directory to: {0}.".format(base2), 'ENCODER') - os.rename(os.path.join(dir, base), dirName) + os.rename(os.path.join(dir, base), dir_name) if 'NZBOP_SCRIPTDIR' in os.environ: - print("[NZB] DIRECTORY={0}".format(dirName)) + print("[NZB] DIRECTORY={0}".format(dir_name)) - for dirname, dirnames, filenames in os.walk(dirName, topdown=False): + for dirname, dirnames, filenames in os.walk(dir_name, topdown=False): for subdirname in dirnames: encoded, subdirname2 = CharReplace(subdirname) if encoded: logger.info("Renaming directory to: {0}.".format(subdirname2), 'ENCODER') os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2)) - for dirname, dirnames, filenames in os.walk(dirName): + for dirname, dirnames, filenames in os.walk(dir_name): for filename in filenames: encoded, filename2 = CharReplace(filename) if encoded: logger.info("Renaming file to: {0}.".format(filename2), 'ENCODER') os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2)) - return inputName, dirName + return input_name, dir_name def parse_other(args): @@ -495,68 +502,68 @@ def parse_other(args): def parse_rtorrent(args): # rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia, # "execute={/path/to/nzbToMedia/TorrentToMedia.py,\"$d.get_base_path=\",\"$d.get_name=\",\"$d.get_custom1=\",\"$d.get_hash=\"}" - inputDirectory = os.path.normpath(args[1]) + input_directory = os.path.normpath(args[1]) try: - inputName = args[2] + input_name = args[2] except: - inputName = '' + input_name = '' try: - inputCategory = args[3] + input_category = args[3] except: - inputCategory = '' + input_category = '' try: - inputHash = args[4] + input_hash = args[4] except: - inputHash = '' + input_hash = '' try: - inputID = args[4] + input_id = args[4] except: - inputID = '' + input_id = '' - return inputDirectory, inputName, inputCategory, inputHash, inputID + return input_directory, input_name, input_category, input_hash, input_id def parse_utorrent(args): # uTorrent usage: call TorrentToMedia.py "%D" "%N" "%L" "%I" - inputDirectory = os.path.normpath(args[1]) - inputName = args[2] + input_directory = os.path.normpath(args[1]) + input_name = args[2] try: - inputCategory = args[3] + input_category = args[3] except: - inputCategory = '' + input_category = '' try: - inputHash = args[4] + input_hash = args[4] except: - inputHash = '' + input_hash = '' try: - inputID = args[4] + input_id = args[4] except: - inputID = '' + input_id = '' - return inputDirectory, inputName, inputCategory, inputHash, inputID + return input_directory, input_name, input_category, input_hash, input_id def parse_deluge(args): # Deluge usage: call TorrentToMedia.py TORRENT_ID TORRENT_NAME TORRENT_DIR - inputDirectory = os.path.normpath(args[3]) - inputName = args[2] - inputHash = args[1] - inputID = args[1] + input_directory = os.path.normpath(args[3]) + input_name = args[2] + input_hash = args[1] + input_id = args[1] try: - inputCategory = core.TORRENT_CLASS.core.get_torrent_status(inputID, ['label']).get()['label'] + input_category = core.TORRENT_CLASS.core.get_torrent_status(input_id, ['label']).get()['label'] except: - inputCategory = '' - return inputDirectory, inputName, inputCategory, inputHash, inputID + input_category = '' + return input_directory, input_name, input_category, input_hash, input_id def parse_transmission(args): # Transmission usage: call TorrenToMedia.py (%TR_TORRENT_DIR% %TR_TORRENT_NAME% is passed on as environmental variables) - inputDirectory = os.path.normpath(os.getenv('TR_TORRENT_DIR')) - inputName = os.getenv('TR_TORRENT_NAME') - inputCategory = '' # We dont have a category yet - inputHash = os.getenv('TR_TORRENT_HASH') - inputID = os.getenv('TR_TORRENT_ID') - return inputDirectory, inputName, inputCategory, inputHash, inputID + input_directory = os.path.normpath(os.getenv('TR_TORRENT_DIR')) + input_name = os.getenv('TR_TORRENT_NAME') + input_category = '' # We dont have a category yet + input_hash = os.getenv('TR_TORRENT_HASH') + input_id = os.getenv('TR_TORRENT_ID') + return input_directory, input_name, input_category, input_hash, input_id def parse_vuze(args): @@ -566,32 +573,32 @@ def parse_vuze(args): except: input = [] try: - inputDirectory = os.path.normpath(input[0]) + input_directory = os.path.normpath(input[0]) except: - inputDirectory = '' + input_directory = '' try: - inputName = input[1] + input_name = input[1] except: - inputName = '' + input_name = '' try: - inputCategory = input[2] + input_category = input[2] except: - inputCategory = '' + input_category = '' try: - inputHash = input[3] + input_hash = input[3] except: - inputHash = '' + input_hash = '' try: - inputID = input[3] + input_id = input[3] except: - inputID = '' + input_id = '' try: if input[4] == 'single': - inputName = input[5] + input_name = input[5] except: pass - return inputDirectory, inputName, inputCategory, inputHash, inputID + return input_directory, input_name, input_category, input_hash, input_id def parse_qbittorrent(args): # qbittorrent usage: C:\full\path\to\nzbToMedia\TorrentToMedia.py "%D|%N|%L|%I" @@ -600,27 +607,27 @@ def parse_qbittorrent(args): except: input = [] try: - inputDirectory = os.path.normpath(input[0].replace('"','')) + input_directory = os.path.normpath(input[0].replace('"','')) except: - inputDirectory = '' + input_directory = '' try: - inputName = input[1].replace('"','') + input_name = input[1].replace('"','') except: - inputName = '' + input_name = '' try: - inputCategory = input[2].replace('"','') + input_category = input[2].replace('"','') except: - inputCategory = '' + input_category = '' try: - inputHash = input[3].replace('"','') + input_hash = input[3].replace('"','') except: - inputHash = '' + input_hash = '' try: - inputID = input[3].replace('"','') + input_id = input[3].replace('"','') except: - inputID = '' + input_id = '' - return inputDirectory, inputName, inputCategory, inputHash, inputID + return input_directory, input_name, input_category, input_hash, input_id def parse_args(clientAgent, args): clients = { @@ -656,10 +663,10 @@ def getDirs(section, subsection, link='hard'): continue try: logger.debug("Found file {0} in root directory {1}.".format(os.path.split(mediafile)[1], path)) - newPath = None - fileExt = os.path.splitext(mediafile)[1] + new_path = None + file_ext = os.path.splitext(mediafile)[1] try: - if fileExt in core.AUDIOCONTAINER: + if file_ext in core.AUDIOCONTAINER: f = beets.mediafile.MediaFile(mediafile) # get artist and album info @@ -667,8 +674,8 @@ def getDirs(section, subsection, link='hard'): album = f.album # create new path - newPath = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) - elif fileExt in core.MEDIACONTAINER: + new_path = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) + elif file_ext in core.MEDIACONTAINER: f = guessit.guessit(mediafile) # get title @@ -677,29 +684,29 @@ def getDirs(section, subsection, link='hard'): if not title: title = os.path.splitext(os.path.basename(mediafile))[0] - newPath = os.path.join(path, sanitizeName(title)) + new_path = os.path.join(path, sanitizeName(title)) except Exception as e: logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e)) - if not newPath: + if not new_path: title = os.path.splitext(os.path.basename(mediafile))[0] - newPath = os.path.join(path, sanitizeName(title)) + new_path = os.path.join(path, sanitizeName(title)) try: - newPath = newPath.encode(core.SYS_ENCODING) + new_path = new_path.encode(core.SYS_ENCODING) except: pass # Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe). - if os.path.isfile(newPath): - newPath2 = os.path.join(os.path.join(os.path.split(newPath)[0], 'new'), os.path.split(newPath)[1]) - newPath = newPath2 + if os.path.isfile(new_path): + new_path2 = os.path.join(os.path.join(os.path.split(new_path)[0], 'new'), os.path.split(new_path)[1]) + new_path = new_path2 # create new path if it does not exist - if not os.path.exists(newPath): - makeDir(newPath) + if not os.path.exists(new_path): + makeDir(new_path) - newfile = os.path.join(newPath, sanitizeName(os.path.split(mediafile)[1])) + newfile = os.path.join(new_path, sanitizeName(os.path.split(mediafile)[1])) try: newfile = newfile.encode(core.SYS_ENCODING) except: @@ -733,9 +740,9 @@ def getDirs(section, subsection, link='hard'): if core.USELINK == 'move': try: - outputDirectory = os.path.join(core.OUTPUTDIRECTORY, subsection) - if os.path.exists(outputDirectory): - to_return.extend(processDir(outputDirectory)) + output_directory = os.path.join(core.OUTPUTDIRECTORY, subsection) + if os.path.exists(output_directory): + to_return.extend(processDir(output_directory)) except Exception as e: logger.error("Failed to add directories from {0} for post-processing: {1}".format(core.OUTPUTDIRECTORY, e)) @@ -781,10 +788,10 @@ def cleanDir(path, section, subsection): logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') rmDir(path) return - minSize = int(cfg.get('minSize', 0)) + min_size = int(cfg.get('minSize', 0)) delete_ignored = int(cfg.get('delete_ignored', 0)) try: - num_files = len(listMediaFiles(path, minSize=minSize, delete_ignored=delete_ignored)) + num_files = len(listMediaFiles(path, minSize=min_size, delete_ignored=delete_ignored)) except: num_files = 'unknown' if num_files > 0: @@ -917,10 +924,10 @@ def find_download(clientAgent, download_id): return True if clientAgent == 'sabnzbd': if "http" in core.SABNZBDHOST: - baseURL = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) + base_url = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) else: - baseURL = "http://{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) - url = baseURL + base_url = "http://{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) + url = base_url params = { 'apikey': core.SABNZBDAPIKEY, 'mode': "get_files", @@ -944,10 +951,10 @@ def get_nzoid(inputName): slots = [] logger.debug("Searching for nzoid from SAbnzbd ...") if "http" in core.SABNZBDHOST: - baseURL = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) + base_url = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) else: - baseURL = "http://{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) - url = baseURL + base_url = "http://{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) + url = base_url params = { 'apikey': core.SABNZBDAPIKEY, 'mode': "queue", @@ -960,7 +967,7 @@ def get_nzoid(inputName): return nzoid # failure try: result = r.json() - cleanName = os.path.splitext(os.path.split(inputName)[1])[0] + clean_name = os.path.splitext(os.path.split(inputName)[1])[0] slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']]) except: logger.warning("Data from SABnzbd queue could not be parsed") @@ -972,13 +979,13 @@ def get_nzoid(inputName): return nzoid # failure try: result = r.json() - cleanName = os.path.splitext(os.path.split(inputName)[1])[0] + clean_name = os.path.splitext(os.path.split(inputName)[1])[0] slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']]) except: logger.warning("Data from SABnzbd history could not be parsed") try: for nzo_id, name in slots: - if name in [inputName, cleanName]: + if name in [inputName, clean_name]: nzoid = nzo_id logger.debug("Found nzoid: {0}".format(nzoid)) break @@ -1014,19 +1021,19 @@ def is_archive_file(filename): def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): - fileName, fileExt = os.path.splitext(mediafile) + file_name, file_ext = os.path.splitext(mediafile) try: # ignore MAC OS's "resource fork" files - if fileName.startswith('._'): + if file_name.startswith('._'): return False except: pass - if (media and fileExt.lower() in core.MEDIACONTAINER) \ - or (audio and fileExt.lower() in core.AUDIOCONTAINER) \ - or (meta and fileExt.lower() in core.METACONTAINER) \ + if (media and file_ext.lower() in core.MEDIACONTAINER) \ + or (audio and file_ext.lower() in core.AUDIOCONTAINER) \ + or (meta and file_ext.lower() in core.METACONTAINER) \ or (archives and is_archive_file(mediafile)) \ - or (other and (fileExt.lower() in otherext or 'all' in otherext)): + or (other and (file_ext.lower() in otherext or 'all' in otherext)): return True else: return False @@ -1036,15 +1043,15 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me files = [] if not os.path.isdir(path): if os.path.isfile(path): # Single file downloads. - curFile = os.path.split(path)[1] - if isMediaFile(curFile, media, audio, meta, archives, other, otherext): + cur_file = os.path.split(path)[1] + if isMediaFile(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files if is_sample(path) or not is_minSize(path, minSize): if delete_ignored == 1: try: os.unlink(path) logger.debug('Ignored file {0} has been removed ...'.format - (curFile)) + (cur_file)) except: pass else: @@ -1052,26 +1059,26 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me return files - for curFile in os.listdir(text_type(path)): - fullCurFile = os.path.join(path, curFile) + for cur_file in os.listdir(text_type(path)): + full_cur_file = os.path.join(path, cur_file) # if it's a folder do it recursively - if os.path.isdir(fullCurFile) and not curFile.startswith('.'): - files += listMediaFiles(fullCurFile, minSize, delete_ignored, media, audio, meta, archives, other, otherext) + if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): + files += listMediaFiles(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) - elif isMediaFile(curFile, media, audio, meta, archives, other, otherext): + elif isMediaFile(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(fullCurFile) or not is_minSize(fullCurFile, minSize): + if is_sample(full_cur_file) or not is_minSize(full_cur_file, minSize): if delete_ignored == 1: try: - os.unlink(fullCurFile) + os.unlink(full_cur_file) logger.debug('Ignored file {0} has been removed ...'.format - (curFile)) + (cur_file)) except: pass continue - files.append(fullCurFile) + files.append(full_cur_file) return sorted(files, key=len) @@ -1158,29 +1165,29 @@ def extractFiles(src, dst=None, keep_archive=None): extracted_archive = [] for inputFile in listMediaFiles(src, media=False, audio=False, meta=False, archives=True): - dirPath = os.path.dirname(inputFile) - fullFileName = os.path.basename(inputFile) - archiveName = os.path.splitext(fullFileName)[0] - archiveName = re.sub(r"part[0-9]+", "", archiveName) + dir_path = os.path.dirname(inputFile) + full_file_name = os.path.basename(inputFile) + archive_name = os.path.splitext(full_file_name)[0] + archive_name = re.sub(r"part[0-9]+", "", archive_name) - if dirPath in extracted_folder and archiveName in extracted_archive: + if dir_path in extracted_folder and archive_name in extracted_archive: continue # no need to extract this, but keep going to look for other archives and sub directories. try: - if extractor.extract(inputFile, dst or dirPath): - extracted_folder.append(dirPath) - extracted_archive.append(archiveName) + if extractor.extract(inputFile, dst or dir_path): + extracted_folder.append(dir_path) + extracted_archive.append(archive_name) except Exception: - logger.error("Extraction failed for: {0}".format(fullFileName)) + logger.error("Extraction failed for: {0}".format(full_file_name)) for folder in extracted_folder: for inputFile in listMediaFiles(folder, media=False, audio=False, meta=False, archives=True): - fullFileName = os.path.basename(inputFile) - archiveName = os.path.splitext(fullFileName)[0] - archiveName = re.sub(r"part[0-9]+", "", archiveName) - if archiveName not in extracted_archive or keep_archive: + full_file_name = os.path.basename(inputFile) + archive_name = os.path.splitext(full_file_name)[0] + archive_name = re.sub(r"part[0-9]+", "", archive_name) + if archive_name not in extracted_archive or keep_archive: continue # don't remove if we haven't extracted this archive, or if we want to preserve them. - logger.info("Removing extracted archive {0} from folder {1} ...".format(fullFileName, folder)) + logger.info("Removing extracted archive {0} from folder {1} ...".format(full_file_name, folder)) try: if not os.access(inputFile, os.W_OK): os.chmod(inputFile, stat.S_IWUSR) @@ -1252,7 +1259,7 @@ def plex_update(category): def backupVersionedFile(old_file, version): - numTries = 0 + num_tries = 0 new_file = '{old}.v{version}'.format(old=old_file, version=version) @@ -1269,11 +1276,11 @@ def backupVersionedFile(old_file, version): except Exception as error: logger.log(u"Error while trying to back up {old} to {new} : {msg}".format (old=old_file, new=new_file, msg=error), logger.WARNING) - numTries += 1 + num_tries += 1 time.sleep(1) logger.log(u"Trying again.", logger.DEBUG) - if numTries >= 10: + if num_tries >= 10: logger.log(u"Unable to back up {old} to {new} please do it manually.".format(old=old_file, new=new_file), logger.ERROR) return False @@ -1283,19 +1290,19 @@ def backupVersionedFile(old_file, version): def update_downloadInfoStatus(inputName, status): logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) - myDB = nzbToMediaDB.DBConnection() - myDB.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?", + my_db = nzbToMediaDB.DBConnection() + my_db.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?", [status, datetime.date.today().toordinal(), text_type(inputName)]) def get_downloadInfo(inputName, status): logger.db("Getting download info for {0} from the DB".format(inputName)) - myDB = nzbToMediaDB.DBConnection() - sqlResults = myDB.select("SELECT * FROM downloads WHERE input_name=? AND status=?", + my_db = nzbToMediaDB.DBConnection() + sql_results = my_db.select("SELECT * FROM downloads WHERE input_name=? AND status=?", [text_type(inputName), status]) - return sqlResults + return sql_results class RunningProcess(object): diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index f350fa1e..cb812642 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -17,10 +17,10 @@ from core.nzbToMediaUtil import makeDir def isVideoGood(videofile, status): - fileNameExt = os.path.basename(videofile) - fileName, fileExt = os.path.splitext(fileNameExt) + file_name_ext = os.path.basename(videofile) + file_name, file_ext = os.path.splitext(file_name_ext) disable = False - if fileExt not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or fileExt in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED): + if file_ext not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED): disable = True else: test_details, res = getVideoDetails(core.TEST_FILE) @@ -28,9 +28,9 @@ def isVideoGood(videofile, status): disable = True logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER') if test_details.get("streams"): - vidStreams = [item for item in test_details["streams"] if "codec_type" in item and item["codec_type"] == "video"] - audStreams = [item for item in test_details["streams"] if "codec_type" in item and item["codec_type"] == "audio"] - if not (len(vidStreams) > 0 and len(audStreams) > 0): + vid_streams = [item for item in test_details["streams"] if "codec_type" in item and item["codec_type"] == "video"] + aud_streams = [item for item in test_details["streams"] if "codec_type" in item and item["codec_type"] == "audio"] + if not (len(vid_streams) > 0 and len(aud_streams) > 0): disable = True logger.info("DISABLED: ffprobe failed to analyse streams from test file. Stopping corruption check.", 'TRANSCODER') @@ -40,25 +40,25 @@ def isVideoGood(videofile, status): else: return True - logger.info('Checking [{0}] for corruption, please stand by ...'.format(fileNameExt), 'TRANSCODER') + logger.info('Checking [{0}] for corruption, please stand by ...'.format(file_name_ext), 'TRANSCODER') video_details, result = getVideoDetails(videofile) if result != 0: - logger.error("FAILED: [{0}] is corrupted!".format(fileNameExt), 'TRANSCODER') + logger.error("FAILED: [{0}] is corrupted!".format(file_name_ext), 'TRANSCODER') return False if video_details.get("error"): - logger.info("FAILED: [{0}] returned error [{1}].".format(fileNameExt, video_details.get("error")), 'TRANSCODER') + logger.info("FAILED: [{0}] returned error [{1}].".format(file_name_ext, video_details.get("error")), 'TRANSCODER') return False if video_details.get("streams"): - videoStreams = [item for item in video_details["streams"] if item["codec_type"] == "video"] - audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] - if len(videoStreams) > 0 and len(audioStreams) > 0: - logger.info("SUCCESS: [{0}] has no corruption.".format(fileNameExt), 'TRANSCODER') + video_streams = [item for item in video_details["streams"] if item["codec_type"] == "video"] + audio_streams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] + if len(video_streams) > 0 and len(audio_streams) > 0: + logger.info("SUCCESS: [{0}] has no corruption.".format(file_name_ext), 'TRANSCODER') return True else: logger.info("FAILED: [{0}] has {1} video streams and {2} audio streams. " "Assume corruption.".format - (fileNameExt, len(videoStreams), len(audioStreams)), 'TRANSCODER') + (file_name_ext, len(video_streams), len(audio_streams)), 'TRANSCODER') return False @@ -118,7 +118,7 @@ def getVideoDetails(videofile, img=None, bitbucket=None): def buildCommands(file, newDir, movieName, bitbucket): if isinstance(file, string_types): - inputFile = file + input_file = file if 'concat:' in file: file = file.split('|')[0].replace('concat:', '') video_details, result = getVideoDetails(file) @@ -137,10 +137,10 @@ def buildCommands(file, newDir, movieName, bitbucket): img, data = next(iteritems(file)) name = data['name'] video_details, result = getVideoDetails(data['files'][0], img, bitbucket) - inputFile = '-' + input_file = '-' file = '-' - newfilePath = os.path.normpath(os.path.join(newDir, name) + core.VEXTENSION) + newfile_path = os.path.normpath(os.path.join(newDir, name) + core.VEXTENSION) map_cmd = [] video_cmd = [] @@ -152,9 +152,9 @@ def buildCommands(file, newDir, movieName, bitbucket): if not video_details or not video_details.get( "streams"): # we couldn't read streams with ffprobe. Set defaults to try transcoding. - videoStreams = [] - audioStreams = [] - subStreams = [] + video_streams = [] + audio_streams = [] + sub_streams = [] map_cmd.extend(['-map', '0']) if core.VCODEC: @@ -201,15 +201,15 @@ def buildCommands(file, newDir, movieName, bitbucket): other_cmd.extend(['-movflags', '+faststart']) else: - videoStreams = [item for item in video_details["streams"] if item["codec_type"] == "video"] - audioStreams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] - subStreams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle"] + video_streams = [item for item in video_details["streams"] if item["codec_type"] == "video"] + audio_streams = [item for item in video_details["streams"] if item["codec_type"] == "audio"] + sub_streams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle"] if core.VEXTENSION not in ['.mkv', '.mpegts']: - subStreams = [item for item in video_details["streams"] if + sub_streams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item[ "codec_name"] != "pgssub"] - for video in videoStreams: + for video in video_streams: codec = video["codec_name"] fr = video.get("avg_frame_rate", 0) width = video.get("width", 0) @@ -257,24 +257,24 @@ def buildCommands(file, newDir, movieName, bitbucket): used_audio = 0 a_mapped = [] commentary = [] - if audioStreams: - for i, val in reversed(list(enumerate(audioStreams))): + if audio_streams: + for i, val in reversed(list(enumerate(audio_streams))): try: if "Commentary" in val.get("tags").get("title"): # Split out commentry tracks. commentary.append(val) - del audioStreams[i] + del audio_streams[i] except: continue try: - audio1 = [item for item in audioStreams if item["tags"]["language"] == core.ALANGUAGE] + audio1 = [item for item in audio_streams if item["tags"]["language"] == core.ALANGUAGE] except: # no language tags. Assume only 1 language. - audio1 = audioStreams + audio1 = audio_streams try: audio2 = [item for item in audio1 if item["codec_name"] in core.ACODEC_ALLOW] except: audio2 = [] try: - audio3 = [item for item in audioStreams if item["tags"]["language"] != core.ALANGUAGE] + audio3 = [item for item in audio_streams if item["tags"]["language"] != core.ALANGUAGE] except: audio3 = [] try: @@ -384,8 +384,8 @@ def buildCommands(file, newDir, movieName, bitbucket): audio_cmd.extend(audio_cmd2) if core.AINCLUDE and core.ACODEC3: - audioStreams.extend(commentary) #add commentry tracks back here. - for audio in audioStreams: + audio_streams.extend(commentary) #add commentry tracks back here. + for audio in audio_streams: if audio["index"] in a_mapped: continue used_audio += 1 @@ -422,7 +422,7 @@ def buildCommands(file, newDir, movieName, bitbucket): n = 0 for lan in core.SLANGUAGES: try: - subs1 = [item for item in subStreams if item["tags"]["language"] == lan] + subs1 = [item for item in sub_streams if item["tags"]["language"] == lan] except: subs1 = [] if core.BURN and not subs1 and not burnt and os.path.isfile(file): @@ -431,13 +431,13 @@ def buildCommands(file, newDir, movieName, bitbucket): video_cmd.extend(['-vf', 'subtitles={subs}'.format(subs=subfile)]) burnt = 1 for sub in subs1: - if core.BURN and not burnt and os.path.isfile(inputFile): + if core.BURN and not burnt and os.path.isfile(input_file): subloc = 0 - for index in range(len(subStreams)): - if subStreams[index]["index"] == sub["index"]: + for index in range(len(sub_streams)): + if sub_streams[index]["index"] == sub["index"]: subloc = index break - video_cmd.extend(['-vf', 'subtitles={sub}:si={loc}'.format(sub=inputFile, loc=subloc)]) + video_cmd.extend(['-vf', 'subtitles={sub}:si={loc}'.format(sub=input_file, loc=subloc)]) burnt = 1 if not core.ALLOWSUBS: break @@ -447,7 +447,7 @@ def buildCommands(file, newDir, movieName, bitbucket): s_mapped.extend([sub["index"]]) if core.SINCLUDE: - for sub in subStreams: + for sub in sub_streams: if not core.ALLOWSUBS: break if sub["index"] in s_mapped: @@ -467,7 +467,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.GENERALOPTS: command.extend(core.GENERALOPTS) - command.extend(['-i', inputFile]) + command.extend(['-i', input_file]) if core.SEMBED and os.path.isfile(file): for subfile in get_subs(file): @@ -509,7 +509,7 @@ def buildCommands(file, newDir, movieName, bitbucket): command.extend(sub_cmd) command.extend(meta_cmd) command.extend(other_cmd) - command.append(newfilePath) + command.append(newfile_path) if platform.system() != 'Windows': command = core.NICENESS + command return command @@ -517,13 +517,13 @@ def buildCommands(file, newDir, movieName, bitbucket): def get_subs(file): filepaths = [] - subExt = ['.srt', '.sub', '.idx'] + sub_ext = ['.srt', '.sub', '.idx'] name = os.path.splitext(os.path.split(file)[1])[0] dir = os.path.split(file)[0] for dirname, dirs, filenames in os.walk(dir): for filename in filenames: filepaths.extend([os.path.join(dirname, filename)]) - subfiles = [item for item in filepaths if os.path.splitext(item)[1] in subExt and name in item] + subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext and name in item] return subfiles @@ -539,30 +539,30 @@ def extract_subs(file, newfilePath, bitbucket): name = os.path.splitext(os.path.split(newfilePath)[1])[0] try: - subStreams = [item for item in video_details["streams"] if + sub_streams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["tags"]["language"] in core.SLANGUAGES and item[ "codec_name"] != "hdmv_pgs_subtitle" and item["codec_name"] != "pgssub"] except: - subStreams = [item for item in video_details["streams"] if + sub_streams = [item for item in video_details["streams"] if item["codec_type"] == "subtitle" and item["codec_name"] != "hdmv_pgs_subtitle" and item[ "codec_name"] != "pgssub"] - num = len(subStreams) + num = len(sub_streams) for n in range(num): - sub = subStreams[n] + sub = sub_streams[n] idx = sub["index"] lan = sub.get("tags", {}).get("language", "unk") if num == 1: - outputFile = os.path.join(subdir, "{0}.srt".format(name)) - if os.path.isfile(outputFile): - outputFile = os.path.join(subdir, "{0}.{1}.srt".format(name, n)) + output_file = os.path.join(subdir, "{0}.srt".format(name)) + if os.path.isfile(output_file): + output_file = os.path.join(subdir, "{0}.{1}.srt".format(name, n)) else: - outputFile = os.path.join(subdir, "{0}.{1}.srt".format(name, lan)) - if os.path.isfile(outputFile): - outputFile = os.path.join(subdir, "{0}.{1}.{2}.srt".format(name, lan, n)) + output_file = os.path.join(subdir, "{0}.{1}.srt".format(name, lan)) + if os.path.isfile(output_file): + output_file = os.path.join(subdir, "{0}.{1}.{2}.srt".format(name, lan, n)) command = [core.FFMPEG, '-loglevel', 'warning', '-i', file, '-vn', '-an', - '-codec:{index}'.format(index=idx), 'srt', outputFile] + '-codec:{index}'.format(index=idx), 'srt', output_file] if platform.system() != 'Windows': command = core.NICENESS + command @@ -578,7 +578,7 @@ def extract_subs(file, newfilePath, bitbucket): if result == 0: try: - shutil.copymode(file, outputFile) + shutil.copymode(file, output_file) except: pass logger.info("Extracting {0} subtitle from {1} has succeeded".format(lan, file)) @@ -587,76 +587,76 @@ def extract_subs(file, newfilePath, bitbucket): def processList(List, newDir, bitbucket): - remList = [] - newList = [] + rem_list = [] + new_list = [] combine = [] - vtsPath = None + vts_path = None success = True for item in List: ext = os.path.splitext(item)[1].lower() if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") - newList.extend(ripISO(item, newDir, bitbucket)) - remList.append(item) + new_list.extend(ripISO(item, newDir, bitbucket)) + rem_list.append(item) elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") - if not vtsPath: + if not vts_path: try: - vtsPath = re.match("(.+VIDEO_TS)", item).groups()[0] + vts_path = re.match("(.+VIDEO_TS)", item).groups()[0] except: - vtsPath = os.path.split(item)[0] - remList.append(item) + vts_path = os.path.split(item)[0] + rem_list.append(item) elif re.match(".+VIDEO_TS.", item) or re.match(".+VTS_[0-9][0-9]_[0-9].", item): - remList.append(item) + rem_list.append(item) elif core.CONCAT and re.match(".+[cC][dD][0-9].", item): - remList.append(item) + rem_list.append(item) combine.append(item) else: continue - if vtsPath: - newList.extend(combineVTS(vtsPath)) + if vts_path: + new_list.extend(combineVTS(vts_path)) if combine: - newList.extend(combineCD(combine)) - for file in newList: + new_list.extend(combineCD(combine)) + for file in new_list: if isinstance(file, string_types) and 'concat:' not in file and not os.path.isfile(file): success = False break - if success and newList: - List.extend(newList) - for item in remList: + if success and new_list: + List.extend(new_list) + for item in rem_list: List.remove(item) - logger.debug("Successfully extracted .vob file {0} from disk image".format(newList[0]), "TRANSCODER") - elif newList and not success: - newList = [] - remList = [] + logger.debug("Successfully extracted .vob file {0} from disk image".format(new_list[0]), "TRANSCODER") + elif new_list and not success: + new_list = [] + rem_list = [] logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER") - return List, remList, newList, success + return List, rem_list, new_list, success def ripISO(item, newDir, bitbucket): - newFiles = [] + new_files = [] failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. if not core.SEVENZIP: logger.error("No 7zip installed. Can't extract image file {0}".format(item), "TRANSCODER") - newFiles = [failure_dir] - return newFiles + new_files = [failure_dir] + return new_files cmd = [core.SEVENZIP, 'l', item] try: logger.debug("Attempting to extract .vob from image file {0}".format(item), "TRANSCODER") print_cmd(cmd) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) out, err = proc.communicate() - fileList = [re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in + file_list = [re.match(".+(VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])", line).groups()[0] for line in out.splitlines() if re.match(".+VIDEO_TS[\\\/]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", line)] combined = [] for n in range(99): concat = [] m = 1 while True: - vtsName = 'VIDEO_TS{0}VTS_{1:02d}_{2:d}.VOB'.format(os.sep, n + 1, m) - if vtsName in fileList: - concat.append(vtsName) + vts_name = 'VIDEO_TS{0}VTS_{1:02d}_{2:d}.VOB'.format(os.sep, n + 1, m) + if vts_name in file_list: + concat.append(vts_name) m += 1 else: break @@ -668,29 +668,29 @@ def ripISO(item, newDir, bitbucket): name = '{name}.cd{x}'.format( name=os.path.splitext(os.path.split(item)[1])[0], x=n + 1 ) - newFiles.append({item: {'name': name, 'files': concat}}) + new_files.append({item: {'name': name, 'files': concat}}) if core.CONCAT: name = os.path.splitext(os.path.split(item)[1])[0] - newFiles.append({item: {'name': name, 'files': combined}}) - if not newFiles: + new_files.append({item: {'name': name, 'files': combined}}) + if not new_files: logger.error("No VIDEO_TS folder found in image file {0}".format(item), "TRANSCODER") - newFiles = [failure_dir] + new_files = [failure_dir] except: logger.error("Failed to extract from image file {0}".format(item), "TRANSCODER") - newFiles = [failure_dir] - return newFiles + new_files = [failure_dir] + return new_files def combineVTS(vtsPath): - newFiles = [] + new_files = [] combined = '' for n in range(99): concat = '' m = 1 while True: - vtsName = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) - if os.path.isfile(os.path.join(vtsPath, vtsName)): - concat += '{file}|'.format(file=os.path.join(vtsPath, vtsName)) + vts_name = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) + if os.path.isfile(os.path.join(vtsPath, vts_name)): + concat += '{file}|'.format(file=os.path.join(vtsPath, vts_name)) m += 1 else: break @@ -699,14 +699,14 @@ def combineVTS(vtsPath): if core.CONCAT: combined += '{files}|'.format(files=concat) continue - newFiles.append('concat:{0}'.format(concat[:-1])) + new_files.append('concat:{0}'.format(concat[:-1])) if core.CONCAT: - newFiles.append('concat:{0}'.format(combined[:-1])) - return newFiles + new_files.append('concat:{0}'.format(combined[:-1])) + return new_files def combineCD(combine): - newFiles = [] + new_files = [] for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]): concat = '' for n in range(99): @@ -717,8 +717,8 @@ def combineCD(combine): else: break if concat: - newFiles.append('concat:{0}'.format(concat[:-1])) - return newFiles + new_files.append('concat:{0}'.format(concat[:-1])) + return new_files def print_cmd(command): @@ -734,43 +734,43 @@ def Transcode_directory(dirName): logger.info("Checking for files to be transcoded") final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: - newDir = core.OUTPUTVIDEOPATH - makeDir(newDir) + new_dir = core.OUTPUTVIDEOPATH + makeDir(new_dir) name = os.path.splitext(os.path.split(dirName)[1])[0] - newDir = os.path.join(newDir, name) - makeDir(newDir) + new_dir = os.path.join(new_dir, name) + makeDir(new_dir) else: - newDir = dirName + new_dir = dirName if platform.system() == 'Windows': bitbucket = open('NUL') else: bitbucket = open('/dev/null') - movieName = os.path.splitext(os.path.split(dirName)[1])[0] - List = core.listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) - List, remList, newList, success = processList(List, newDir, bitbucket) + movie_name = os.path.splitext(os.path.split(dirName)[1])[0] + file_list = core.listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) + file_list, rem_list, new_list, success = processList(file_list, new_dir, bitbucket) if not success: bitbucket.close() return 1, dirName - for file in List: + for file in file_list: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: continue - command = buildCommands(file, newDir, movieName, bitbucket) - newfilePath = command[-1] + command = buildCommands(file, new_dir, movie_name, bitbucket) + newfile_path = command[-1] # transcoding files may remove the original file, so make sure to extract subtitles first if core.SEXTRACT and isinstance(file, string_types): - extract_subs(file, newfilePath, bitbucket) + extract_subs(file, newfile_path, bitbucket) try: # Try to remove the file that we're transcoding to just in case. (ffmpeg will return an error if it already exists for some reason) - os.remove(newfilePath) + os.remove(newfile_path) except OSError as e: if e.errno != errno.ENOENT: # Ignore the error if it's just telling us that the file doesn't exist logger.debug("Error when removing transcoding target: {0}".format(e)) except Exception as e: logger.debug("Error when removing transcoding target: {0}".format(e)) - logger.info("Transcoding video: {0}".format(newfilePath)) + logger.info("Transcoding video: {0}".format(newfile_path)) print_cmd(command) result = 1 # set result to failed in case call fails. try: @@ -787,42 +787,42 @@ def Transcode_directory(dirName): proc.communicate() result = proc.returncode except: - logger.error("Transcoding of video {0} has failed".format(newfilePath)) + logger.error("Transcoding of video {0} has failed".format(newfile_path)) if core.SUBSDIR and result == 0 and isinstance(file, string_types): for sub in get_subs(file): name = os.path.splitext(os.path.split(file)[1])[0] subname = os.path.split(sub)[1] - newname = os.path.splitext(os.path.split(newfilePath)[1])[0] + newname = os.path.splitext(os.path.split(newfile_path)[1])[0] newpath = os.path.join(core.SUBSDIR, subname.replace(name, newname)) if not os.path.isfile(newpath): os.rename(sub, newpath) if result == 0: try: - shutil.copymode(file, newfilePath) + shutil.copymode(file, newfile_path) except: pass - logger.info("Transcoding of video to {0} succeeded".format(newfilePath)) - if os.path.isfile(newfilePath) and (file in newList or not core.DUPLICATE): + logger.info("Transcoding of video to {0} succeeded".format(newfile_path)) + if os.path.isfile(newfile_path) and (file in new_list or not core.DUPLICATE): try: os.unlink(file) except: pass else: - logger.error("Transcoding of video to {0} failed with result {1}".format(newfilePath, result)) + logger.error("Transcoding of video to {0} failed with result {1}".format(newfile_path, result)) # this will be 0 (successful) it all are successful, else will return a positive integer for failure. final_result = final_result + result if final_result == 0 and not core.DUPLICATE: - for file in remList: + for file in rem_list: try: os.unlink(file) except: pass - if not os.listdir(text_type(newDir)): # this is an empty directory and we didn't transcode into it. - os.rmdir(newDir) - newDir = dirName + if not os.listdir(text_type(new_dir)): # this is an empty directory and we didn't transcode into it. + os.rmdir(new_dir) + new_dir = dirName if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB - newDir = dirName + new_dir = dirName bitbucket.close() - return final_result, newDir + return final_result, new_dir diff --git a/nzbToMedia.py b/nzbToMedia.py index a24aaad0..4079f87b 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -645,50 +645,53 @@ except NameError: # post-processing def process(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None): - if core.SAFE_MODE and inputDirectory == core.NZB_DEFAULTDIR: + input_directory = inputDirectory + input_name = inputName + input_category = inputCategory + if core.SAFE_MODE and input_directory == core.NZB_DEFAULTDIR: logger.error( 'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format( - inputDirectory)) + input_directory)) return [-1, ""] if not download_id and clientAgent == 'sabnzbd': - download_id = get_nzoid(inputName) + download_id = get_nzoid(input_name) if clientAgent != 'manual' and not core.DOWNLOADINFO: - logger.debug('Adding NZB download info for directory {0} to database'.format(inputDirectory)) + logger.debug('Adding NZB download info for directory {0} to database'.format(input_directory)) - myDB = nzbToMediaDB.DBConnection() + my_db = nzbToMediaDB.DBConnection() - inputDirectory1 = inputDirectory - inputName1 = inputName + input_directory1 = input_directory + input_name1 = input_name try: - encoded, inputDirectory1 = CharReplace(inputDirectory) - encoded, inputName1 = CharReplace(inputName) + encoded, input_directory1 = CharReplace(input_directory) + encoded, input_name1 = CharReplace(input_name) except: pass - controlValueDict = {"input_directory": text_type(inputDirectory1)} - newValueDict = {"input_name": text_type(inputName1), + control_value_dict = {"input_directory": text_type(input_directory1)} + new_value_dict = {"input_name": text_type(input_name1), "input_hash": text_type(download_id), "input_id": text_type(download_id), "client_agent": text_type(clientAgent), "status": 0, "last_update": datetime.date.today().toordinal() } - myDB.upsert("downloads", newValueDict, controlValueDict) + my_db.upsert("downloads", new_value_dict, control_value_dict) # auto-detect section - if inputCategory is None: - inputCategory = 'UNCAT' - usercat = inputCategory - section = core.CFG.findsection(inputCategory).isenabled() + if input_category is None: + input_category = 'UNCAT' + usercat = input_category + section = core.CFG.findsection(input_category).isenabled() if section is None: section = core.CFG.findsection("ALL").isenabled() if section is None: logger.error( 'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format( - inputCategory)) + input_category)) return [-1, ""] else: usercat = "ALL" @@ -696,65 +699,65 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if len(section) > 1: logger.error( 'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format( - inputCategory, section.keys())) + input_category, section.keys())) return [-1, ""] if section: - sectionName = section.keys()[0] - logger.info('Auto-detected SECTION:{0}'.format(sectionName)) + section_name = section.keys()[0] + logger.info('Auto-detected SECTION:{0}'.format(section_name)) else: logger.error("Unable to locate a section with subsection:{0} enabled in your autoProcessMedia.cfg, exiting!".format( - inputCategory)) + input_category)) return [-1, ""] - cfg = dict(core.CFG[sectionName][usercat]) + cfg = dict(core.CFG[section_name][usercat]) extract = int(cfg.get("extract", 0)) try: if int(cfg.get("remote_path")) and not core.REMOTEPATHS: logger.error('Remote Path is enabled for {0}:{1} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!'.format( - sectionName, inputCategory)) + section_name, input_category)) return [-1, ""] except: logger.error('Remote Path {0} is not valid for {1}:{2} Please set this to either 0 to disable or 1 to enable!'.format( - core.get("remote_path"), sectionName, inputCategory)) + core.get("remote_path"), section_name, input_category)) - inputName, inputDirectory = convert_to_ascii(inputName, inputDirectory) + input_name, input_directory = convert_to_ascii(input_name, input_directory) if extract == 1: - logger.debug('Checking for archives to extract in directory: {0}'.format(inputDirectory)) - extractFiles(inputDirectory) + logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) + extractFiles(input_directory) - logger.info("Calling {0}:{1} to post-process:{2}".format(sectionName, inputCategory, inputName)) + logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) - if sectionName in ["CouchPotato", "Radarr"]: - result = autoProcessMovie().process(sectionName, inputDirectory, inputName, status, clientAgent, download_id, - inputCategory, failureLink) - elif sectionName in ["SickBeard", "NzbDrone", "Sonarr"]: - result = autoProcessTV().processEpisode(sectionName, inputDirectory, inputName, status, clientAgent, - download_id, inputCategory, failureLink) - elif sectionName in ["HeadPhones", "Lidarr"]: - result = autoProcessMusic().process(sectionName, inputDirectory, inputName, status, clientAgent, inputCategory) - elif sectionName == "Mylar": - result = autoProcessComics().processEpisode(sectionName, inputDirectory, inputName, status, clientAgent, - inputCategory) - elif sectionName == "Gamez": - result = autoProcessGames().process(sectionName, inputDirectory, inputName, status, clientAgent, inputCategory) - elif sectionName == 'UserScript': - result = external_script(inputDirectory, inputName, inputCategory, section[usercat]) + if section_name in ["CouchPotato", "Radarr"]: + result = autoProcessMovie().process(section_name, input_directory, input_name, status, clientAgent, download_id, + input_category, failureLink) + elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: + result = autoProcessTV().processEpisode(section_name, input_directory, input_name, status, clientAgent, + download_id, input_category, failureLink) + elif section_name in ["HeadPhones", "Lidarr"]: + result = autoProcessMusic().process(section_name, input_directory, input_name, status, clientAgent, input_category) + elif section_name == "Mylar": + result = autoProcessComics().processEpisode(section_name, input_directory, input_name, status, clientAgent, + input_category) + elif section_name == "Gamez": + result = autoProcessGames().process(section_name, input_directory, input_name, status, clientAgent, input_category) + elif section_name == 'UserScript': + result = external_script(input_directory, input_name, input_category, section[usercat]) else: result = [-1, ""] - plex_update(inputCategory) + plex_update(input_category) if result[0] == 0: if clientAgent != 'manual': # update download status in our DB - update_downloadInfoStatus(inputName, 1) - if sectionName not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: + update_downloadInfoStatus(input_name, 1) + if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: # cleanup our processing folders of any misc unwanted files and empty directories - cleanDir(inputDirectory, sectionName, inputCategory) + cleanDir(input_directory, section_name, input_category) return result @@ -816,7 +819,7 @@ def main(args, section=None): # Check for download_id to pass to CouchPotato download_id = "" - failureLink = None + failure_link = None if 'NZBPR_COUCHPOTATO' in os.environ: download_id = os.environ['NZBPR_COUCHPOTATO'] elif 'NZBPR_DRONE' in os.environ: @@ -828,13 +831,13 @@ def main(args, section=None): elif 'NZBPR_LIDARR' in os.environ: download_id = os.environ['NZBPR_LIDARR'] if 'NZBPR__DNZB_FAILURE' in os.environ: - failureLink = os.environ['NZBPR__DNZB_FAILURE'] + failure_link = os.environ['NZBPR__DNZB_FAILURE'] # All checks done, now launching the script. - clientAgent = 'nzbget' - result = process(os.environ['NZBPP_DIRECTORY'], inputName=os.environ['NZBPP_NZBNAME'], status=status, - clientAgent=clientAgent, download_id=download_id, inputCategory=os.environ['NZBPP_CATEGORY'], - failureLink=failureLink) + client_agent = 'nzbget' + result = process(os.environ['NZBPP_DIRECTORY'], input_name=os.environ['NZBPP_NZBNAME'], status=status, + clientAgent=client_agent, download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'], + failureLink=failure_link) # SABnzbd Pre 0.7.17 elif len(args) == core.SABNZB_NO_OF_ARGUMENTS: # SABnzbd argv: @@ -845,9 +848,9 @@ def main(args, section=None): # 5 User-defined category # 6 Group that the NZB was posted in e.g. alt.binaries.x # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 - clientAgent = 'sabnzbd' + client_agent = 'sabnzbd' logger.info("Script triggered from SABnzbd") - result = process(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent, + result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], clientAgent=client_agent, download_id='') # SABnzbd 0.7.17+ elif len(args) >= core.SABNZB_0717_NO_OF_ARGUMENTS: @@ -860,14 +863,14 @@ def main(args, section=None): # 6 Group that the NZB was posted in e.g. alt.binaries.x # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 # 8 Failure URL - clientAgent = 'sabnzbd' + client_agent = 'sabnzbd' logger.info("Script triggered from SABnzbd 0.7.17+") - result = process(args[1], inputName=args[2], status=args[7], inputCategory=args[5], clientAgent=clientAgent, + result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], clientAgent=client_agent, download_id='', failureLink=''.join(args[8:])) # Generic program elif len(args) > 5 and args[5] == 'generic': logger.info("Script triggered from generic program") - result = process(args[1], inputName=args[2], inputCategory=args[3], download_id=args[4]) + result = process(args[1], input_name=args[2], input_category=args[3], download_id=args[4]) else: # Perform Manual Post-Processing logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") @@ -876,39 +879,39 @@ def main(args, section=None): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dirName in getDirs(section, subsection, link='move'): - logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dirName)) - logger.info("Checking database for download info for {0} ...".format(os.path.basename(dirName))) + for dir_name in getDirs(section, subsection, link='move'): + logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dir_name)) + logger.info("Checking database for download info for {0} ...".format(os.path.basename(dir_name))) - core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dirName), 0) + core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dir_name), 0) if core.DOWNLOADINFO: logger.info("Found download info for {0}, " "setting variables now ...".format - (os.path.basename(dirName))) - clientAgent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) + (os.path.basename(dir_name))) + client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) download_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) else: logger.info('Unable to locate download info for {0}, ' 'continuing to try and process this release ...'.format - (os.path.basename(dirName))) - clientAgent = 'manual' + (os.path.basename(dir_name))) + client_agent = 'manual' download_id = '' - if clientAgent and clientAgent.lower() not in core.NZB_CLIENTS: + if client_agent and client_agent.lower() not in core.NZB_CLIENTS: continue try: - dirName = dirName.encode(core.SYS_ENCODING) + dir_name = dir_name.encode(core.SYS_ENCODING) except UnicodeError: pass - inputName = os.path.basename(dirName) + input_name = os.path.basename(dir_name) try: - inputName = inputName.encode(core.SYS_ENCODING) + input_name = input_name.encode(core.SYS_ENCODING) except UnicodeError: pass - results = process(dirName, inputName, 0, clientAgent=clientAgent, - download_id=download_id or None, inputCategory=subsection) + results = process(dir_name, input_name, 0, clientAgent=client_agent, + download_id=download_id or None, input_category=subsection) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) From d8cbf422dddd388c946918f3aa09d06c3dc43293 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 16 Dec 2018 21:37:44 -0500 Subject: [PATCH 2/4] PEP8 Function name should be lowercase --- TorrentToMedia.py | 50 +++++----- core/__init__.py | 16 ++-- core/autoProcess/autoProcessComics.py | 6 +- core/autoProcess/autoProcessMovie.py | 26 ++--- core/autoProcess/autoProcessMusic.py | 22 ++--- core/autoProcess/autoProcessTV.py | 38 ++++---- core/databases/mainDB.py | 14 +-- core/extractor/extractor.py | 2 +- core/gh_api.py | 6 +- core/logger.py | 2 +- core/nzbToMediaAutoFork.py | 2 +- core/nzbToMediaDB.py | 38 ++++---- core/nzbToMediaSceneExceptions.py | 4 +- core/nzbToMediaUserScript.py | 8 +- core/nzbToMediaUtil.py | 132 +++++++++++++------------- core/transcoder/transcoder.py | 46 ++++----- nzbToMedia.py | 24 ++--- tests/general.py | 6 +- 18 files changed, 221 insertions(+), 221 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index a32c2983..e021ee1d 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -7,11 +7,11 @@ import sys import core from core import logger, nzbToMediaDB from core.nzbToMediaUserScript import external_script -from core.nzbToMediaUtil import CharReplace, convert_to_ascii, plex_update, replace_links +from core.nzbToMediaUtil import char_replace, convert_to_ascii, plex_update, replace_links from libs.six import text_type -def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): +def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): input_directory = inputDirectory input_name = inputName input_category = inputCategory @@ -29,8 +29,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, input_name1 = input_name try: - encoded, input_directory1 = CharReplace(input_directory) - encoded, input_name1 = CharReplace(input_name) + encoded, input_directory1 = char_replace(input_directory) + encoded, input_name1 = char_replace(input_name) except: pass @@ -109,12 +109,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, # This way Processing is isolated. if not os.path.isdir(os.path.join(input_directory, input_name)): basename = os.path.basename(input_directory) - basename = core.sanitizeName(input_name) \ - if input_name == basename else os.path.splitext(core.sanitizeName(input_name))[0] + basename = core.sanitize_name(input_name) \ + if input_name == basename else os.path.splitext(core.sanitize_name(input_name))[0] output_destination = os.path.join(core.OUTPUTDIRECTORY, input_category, basename) elif unique_path: output_destination = os.path.normpath( - core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitizeName(input_name).replace(" ","."))) + core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitize_name(input_name).replace(" ", "."))) else: output_destination = os.path.normpath( core.os.path.join(core.OUTPUTDIRECTORY, input_category)) @@ -143,9 +143,9 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, now = datetime.datetime.now() if extract == 1: - input_files = core.listMediaFiles(input_directory, archives=False, other=True, otherext=extensions) + input_files = core.list_media_files(input_directory, archives=False, other=True, otherext=extensions) else: - input_files = core.listMediaFiles(input_directory, other=True, otherext=extensions) + input_files = core.list_media_files(input_directory, other=True, otherext=extensions) if len(input_files) == 0 and os.path.isfile(input_directory): input_files = [input_directory] logger.debug("Found 1 file to process: {0}".format(input_directory)) @@ -170,8 +170,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if root == 1: if not found_file: logger.debug("Looking for {0} in: {1}".format(input_name, inputFile)) - if any([core.sanitizeName(input_name) in core.sanitizeName(inputFile), - core.sanitizeName(file_name) in core.sanitizeName(input_name)]): + if any([core.sanitize_name(input_name) in core.sanitize_name(inputFile), + core.sanitize_name(file_name) in core.sanitize_name(input_name)]): found_file = True logger.debug("Found file {0} that matches Torrent Name {1}".format (full_file_name, input_name)) @@ -194,7 +194,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if torrent_no_link == 0: try: core.copy_link(inputFile, target_file, core.USELINK) - core.rmReadOnly(target_file) + core.remove_read_only(target_file) except: logger.error("Failed to link: {0} to {1}".format(inputFile, target_file)) @@ -202,7 +202,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if extract == 1: logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) - core.extractFiles(input_directory, output_destination, keep_archive) + core.extract_files(input_directory, output_destination, keep_archive) if input_category not in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. @@ -211,7 +211,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, # Now check if video files exist in destination: if section_name in ["SickBeard", "NzbDrone", "Sonarr", "CouchPotato", "Radarr"]: num_videos = len( - core.listMediaFiles(output_destination, media=True, audio=False, meta=False, archives=False)) + core.list_media_files(output_destination, media=True, audio=False, meta=False, archives=False)) if num_videos > 0: logger.info("Found {0} media files in {1}".format(num_videos, output_destination)) status = 0 @@ -241,14 +241,14 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: if input_hash: input_hash = input_hash.upper() - result = core.autoProcessTV().processEpisode(section_name, output_destination, input_name, - status, clientAgent, input_hash, input_category) + result = core.autoProcessTV().process_episode(section_name, output_destination, input_name, + status, clientAgent, input_hash, input_category) elif section_name in ['HeadPhones', 'Lidarr']: result = core.autoProcessMusic().process(section_name, output_destination, input_name, status, clientAgent, input_category) elif section_name == 'Mylar': - result = core.autoProcessComics().processEpisode(section_name, output_destination, input_name, - status, clientAgent, input_category) + result = core.autoProcessComics().process_episode(section_name, output_destination, input_name, + status, clientAgent, input_category) elif section_name == 'Gamez': result = core.autoProcessGames().process(section_name, output_destination, input_name, status, clientAgent, input_category) @@ -267,7 +267,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, else: if clientAgent != 'manual': # update download status in our DB - core.update_downloadInfoStatus(input_name, 1) + core.update_download_info_status(input_name, 1) # remove torrent if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: @@ -281,7 +281,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if not section_name == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # cleanup our processing folders of any misc unwanted files and empty directories - core.cleanDir(output_destination, section_name, input_category) + core.clean_dir(output_destination, section_name, input_category) return result @@ -310,7 +310,7 @@ def main(args): return -1 if input_directory and input_name and input_hash and input_id: - result = processTorrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) + result = process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) else: # Perform Manual Post-Processing logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") @@ -319,13 +319,13 @@ def main(args): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dir_name in core.getDirs(section, subsection, link='hard'): + for dir_name in core.get_dirs(section, subsection, link='hard'): logger.info("Starting manual run for {0}:{1} - Folder:{2}".format (section, subsection, dir_name)) logger.info("Checking database for download info for {0} ...".format (os.path.basename(dir_name))) - core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dir_name), 0) + core.DOWNLOADINFO = core.get_download_info(os.path.basename(dir_name), 0) if core.DOWNLOADINFO: client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) input_hash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) @@ -353,8 +353,8 @@ def main(args): except UnicodeError: pass - results = processTorrent(dir_name, input_name, subsection, input_hash or None, input_id or None, - client_agent) + results = process_torrent(dir_name, input_name, subsection, input_hash or None, input_id or None, + client_agent) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) diff --git a/core/__init__.py b/core/__init__.py index 212158ca..ca9f2d8d 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -45,10 +45,10 @@ from core.autoProcess.autoProcessTV import autoProcessTV from core.databases import mainDB from core.nzbToMediaConfig import config from core.nzbToMediaUtil import ( - RunningProcess, WakeUp, category_search, cleanDir, cleanDir, copy_link, - create_torrent_class, extractFiles, flatten, getDirs, get_downloadInfo, - listMediaFiles, makeDir, parse_args, pause_torrent, remove_torrent, - resume_torrent, rmDir, rmReadOnly, sanitizeName, update_downloadInfoStatus, + RunningProcess, wake_up, category_search, clean_dir, clean_dir, copy_link, + create_torrent_class, extract_files, flatten, get_dirs, get_download_info, + list_media_files, make_dir, parse_args, pause_torrent, remove_torrent, + resume_torrent, remove_dir, remove_read_only, sanitize_name, update_download_info_status, ) from core.transcoder import transcoder @@ -255,7 +255,7 @@ def initialize(section=None): LOG_FILE = os.environ['NTM_LOGFILE'] LOG_DIR = os.path.split(LOG_FILE)[0] - if not makeDir(LOG_DIR): + if not make_dir(LOG_DIR): print("No log folder, logging to screen only") MYAPP = RunningProcess() @@ -291,7 +291,7 @@ def initialize(section=None): sys.exit(1) # init logging - logger.ntm_log_instance.initLogging() + logger.ntm_log_instance.init_logging() # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. if not config.migrate(): @@ -320,7 +320,7 @@ def initialize(section=None): logger.info("{0}: {1}".format(item, os.environ[item]), "ENVIRONMENT") # initialize the main SB database - nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) + nzbToMediaDB.upgrade_database(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) # Set Version and GIT variables NZBTOMEDIA_VERSION = '11.06' @@ -357,7 +357,7 @@ def initialize(section=None): system=platform.system(), release=platform.release())) if int(CFG["WakeOnLan"]["wake"]) == 1: - WakeUp() + wake_up() NZB_CLIENTAGENT = CFG["Nzb"]["clientAgent"] # sabnzbd SABNZBDHOST = CFG["Nzb"]["sabnzbd_host"] diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 6d4e87f7..c2d00bd1 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -6,13 +6,13 @@ import requests import core from core import logger -from core.nzbToMediaUtil import convert_to_ascii, remoteDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, remote_dir, server_responding requests.packages.urllib3.disable_warnings() class autoProcessComics(object): - def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): + def process_episode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): dir_name = dirName input_name = inputName @@ -42,7 +42,7 @@ class autoProcessComics(object): params = { 'cmd': 'forceProcess', 'apikey': apikey, - 'nzb_folder': remoteDir(dir_name) if remote_path else dir_name, + 'nzb_folder': remote_dir(dir_name) if remote_path else dir_name, } if input_name is not None: diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 44daf1e7..4a30e1cf 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -9,7 +9,7 @@ import requests import core from core import logger from core.nzbToMediaSceneExceptions import process_all_exceptions -from core.nzbToMediaUtil import convert_to_ascii, find_download, find_imdbid, import_subs, listMediaFiles, remoteDir, reportNzb, rmDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, find_download, find_imdbid, import_subs, list_media_files, remote_dir, report_nzb, remove_dir, server_responding from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() @@ -129,7 +129,7 @@ class autoProcessMovie(object): logger.error("{0} did not return expected json data.".format(section), section) return None - def CDH(self, url2, headers, section="MAIN"): + def completed_download_handling(self, url2, headers, section="MAIN"): try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -223,17 +223,17 @@ class autoProcessMovie(object): process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) - core.extractFiles(dir_name) + core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) good_files = 0 num_files = 0 # Check video files for corruption - for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 - if transcoder.isVideoGood(video, status): + if transcoder.is_video_good(video, status): import_subs(video) good_files += 1 if num_files and good_files == num_files: @@ -258,7 +258,7 @@ class autoProcessMovie(object): if status == 0: if core.TRANSCODE == 1: - result, new_dir_name = transcoder.Transcode_directory(dir_name) + result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: logger.debug("Transcoding succeeded for files in {0}".format(dir_name), section) dir_name = new_dir_name @@ -271,7 +271,7 @@ class autoProcessMovie(object): else: logger.error("Transcoding failed for files in {0}".format(dir_name), section) return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] - for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): if not release and ".cp(tt" not in video and imdbid: video_name, video_ext = os.path.splitext(video) video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext) @@ -288,7 +288,7 @@ class autoProcessMovie(object): params['downloader'] = downloader or clientAgent params['download_id'] = download_id - params['media_folder'] = remoteDir(dir_name) if remote_path else dir_name + params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name if section == "CouchPotato": if method == "manage": @@ -344,7 +344,7 @@ class autoProcessMovie(object): core.FAILED = True logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section) if failureLink: - reportNzb(failureLink, clientAgent) + report_nzb(failureLink, clientAgent) if section == "Radarr": logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section) @@ -352,7 +352,7 @@ class autoProcessMovie(object): if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) if not release_id and not media_id: logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(input_name), @@ -451,7 +451,7 @@ class autoProcessMovie(object): dir_name), section) return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] - elif not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=True): + elif not list_media_files(dir_name, media=True, audio=False, meta=False, archives=True): logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format( dir_name), section) return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] @@ -460,7 +460,7 @@ class autoProcessMovie(object): time.sleep(10 * wait_for) # The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now. - if section == "Radarr" and self.CDH(url2, headers, section=section): + if section == "Radarr" and self.completed_download_handling(url2, headers, section=section): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] logger.warning( diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index f931c42e..343bbe92 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -9,7 +9,7 @@ import requests import core from core import logger from core.nzbToMediaSceneExceptions import process_all_exceptions -from core.nzbToMediaUtil import convert_to_ascii, listMediaFiles, remoteDir, rmDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, list_media_files, remote_dir, remove_dir, server_responding requests.packages.urllib3.disable_warnings() @@ -58,7 +58,7 @@ class autoProcessMusic(object): if os.path.basename(dirName) == album['FolderName']: return album["Status"].lower() - def forceProcess(self, params, url, apikey, inputName, dirName, section, wait_for): + def force_process(self, params, url, apikey, inputName, dirName, section, wait_for): release_status = self.get_status(url, apikey, dirName) if not release_status: logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section) @@ -140,9 +140,9 @@ class autoProcessMusic(object): process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - if not listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + if not list_media_files(dir_name, media=False, audio=True, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) - core.extractFiles(dir_name) + core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) #if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: @@ -154,20 +154,20 @@ class autoProcessMusic(object): params = { 'apikey': apikey, 'cmd': "forceProcess", - 'dir': remoteDir(dir_name) if remote_path else dir_name + 'dir': remote_dir(dir_name) if remote_path else dir_name } - res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) + res = self.force_process(params, url, apikey, input_name, dir_name, section, wait_for) if res[0] in [0, 1]: return res params = { 'apikey': apikey, 'cmd': "forceProcess", - 'dir': os.path.split(remoteDir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] + 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] } - res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) + res = self.force_process(params, url, apikey, input_name, dir_name, section, wait_for) if res[0] in [0, 1]: return res @@ -179,8 +179,8 @@ class autoProcessMusic(object): url = "{0}{1}:{2}{3}/api/v1/command".format(protocol, host, port, web_root) headers = {"X-Api-Key": apikey} if remote_path: - logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) - data = {"name": "Rename", "path": remoteDir(dir_name)} + logger.debug("remote_path: {0}".format(remote_dir(dir_name)), section) + data = {"name": "Rename", "path": remote_dir(dir_name)} else: logger.debug("path: {0}".format(dir_name), section) data = {"name": "Rename", "path": dir_name} @@ -238,5 +238,5 @@ class autoProcessMusic(object): logger.warning("FAILED DOWNLOAD DETECTED", section) if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. \ No newline at end of file diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 3175a852..0a2b7761 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -10,9 +10,9 @@ import requests import core from core import logger -from core.nzbToMediaAutoFork import autoFork +from core.nzbToMediaAutoFork import auto_fork from core.nzbToMediaSceneExceptions import process_all_exceptions -from core.nzbToMediaUtil import convert_to_ascii, flatten, import_subs, listMediaFiles, remoteDir, reportNzb, rmDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, flatten, import_subs, list_media_files, remote_dir, report_nzb, remove_dir, server_responding from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() @@ -36,7 +36,7 @@ class autoProcessTV(object): logger.error("{0} did not return expected json data.".format(section), section) return None - def CDH(self, url2, headers, section="MAIN"): + def completed_download_handling(self, url2, headers, section="MAIN"): try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -52,7 +52,7 @@ class autoProcessTV(object): # ValueError catches simplejson's JSONDecodeError and json's ValueError return False - def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): + def process_episode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): cfg = dict(core.CFG[section][inputCategory]) @@ -67,7 +67,7 @@ class autoProcessTV(object): if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): # auto-detect correct fork - fork, fork_params = autoFork(section, inputCategory) + fork, fork_params = auto_fork(section, inputCategory) elif not username and not apikey: logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only') fork, fork_params = "None", {} @@ -119,21 +119,21 @@ class autoProcessTV(object): input_name, dir_name = convert_to_ascii(input_name, dir_name) # Now check if tv files exist in destination. - if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): - if listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): + if list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) - core.extractFiles(dir_name) + core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - if listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. + if list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. flatten(dir_name) # Check video files for corruption good_files = 0 num_files = 0 - for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 - if transcoder.isVideoGood(video, status): + if transcoder.is_video_good(video, status): good_files += 1 import_subs(video) if num_files > 0: @@ -170,7 +170,7 @@ class autoProcessTV(object): print('[NZB] MARK=BAD') if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads - result, new_dir_name = transcoder.Transcode_directory(dir_name) + result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dir_name), section) dir_name = new_dir_name @@ -209,7 +209,7 @@ class autoProcessTV(object): if param in ["dir_name", "dir", "proc_dir", "process_directory", "path"]: fork_params[param] = dir_name if remote_path: - fork_params[param] = remoteDir(dir_name) + fork_params[param] = remote_dir(dir_name) if param == "process_method": if process_method: @@ -249,7 +249,7 @@ class autoProcessTV(object): else: core.FAILED = True if failureLink: - reportNzb(failureLink, clientAgent) + report_nzb(failureLink, clientAgent) if 'failed' in fork_params: logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) elif section == "NzbDrone": @@ -259,7 +259,7 @@ class autoProcessTV(object): logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section) if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. url = None @@ -274,8 +274,8 @@ class autoProcessTV(object): headers = {"X-Api-Key": apikey} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: - logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) - data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dir_name), "downloadClientId": download_id, "importMode": import_mode} + logger.debug("remote_path: {0}".format(remote_dir(dir_name)), section) + data = {"name": "DownloadedEpisodesScan", "path": remote_dir(dir_name), "downloadClientId": download_id, "importMode": import_mode} else: logger.debug("path: {0}".format(dir_name), section) data = {"name": "DownloadedEpisodesScan", "path": dir_name, "downloadClientId": download_id, "importMode": import_mode} @@ -340,7 +340,7 @@ class autoProcessTV(object): if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) if success: return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] @@ -365,7 +365,7 @@ class autoProcessTV(object): elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) # return [1, "%s: Failed to post-process %s" % (section, input_name) ] - if self.CDH(url2, headers, section=section): + if self.completed_download_handling(url2, headers, section=section): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] else: diff --git a/core/databases/mainDB.py b/core/databases/mainDB.py index d79033db..d6c1e1c3 100644 --- a/core/databases/mainDB.py +++ b/core/databases/mainDB.py @@ -1,15 +1,15 @@ # coding=utf-8 from core import logger, nzbToMediaDB -from core.nzbToMediaUtil import backupVersionedFile +from core.nzbToMediaUtil import backup_versioned_file MIN_DB_VERSION = 1 # oldest db version we support migrating from MAX_DB_VERSION = 2 -def backupDatabase(version): +def backup_database(version): logger.info("Backing up database before upgrade") - if not backupVersionedFile(nzbToMediaDB.dbFilename(), version): + if not backup_versioned_file(nzbToMediaDB.db_filename(), version): logger.log_error_and_exit("Database backup failed, abort upgrading database") else: logger.info("Proceeding with upgrade") @@ -23,13 +23,13 @@ def backupDatabase(version): class InitialSchema(nzbToMediaDB.SchemaUpgrade): def test(self): no_update = False - if self.hasTable("db_version"): - cur_db_version = self.checkDBVersion() + if self.has_table("db_version"): + cur_db_version = self.check_db_version() no_update = not cur_db_version < MAX_DB_VERSION return no_update def execute(self): - if not self.hasTable("downloads") and not self.hasTable("db_version"): + if not self.has_table("downloads") and not self.has_table("db_version"): queries = [ "CREATE TABLE db_version (db_version INTEGER);", "CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));", @@ -39,7 +39,7 @@ class InitialSchema(nzbToMediaDB.SchemaUpgrade): self.connection.action(query) else: - cur_db_version = self.checkDBVersion() + cur_db_version = self.check_db_version() if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit(u"Your database version ({current}) is too old to migrate " diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 4f3b4454..14a3a6ba 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -88,7 +88,7 @@ def extract(filePath, outputDestination): return False # Create outputDestination folder - core.makeDir(outputDestination) + core.make_dir(outputDestination) if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))] diff --git a/core/gh_api.py b/core/gh_api.py index 97433d9b..6e44f9f3 100644 --- a/core/gh_api.py +++ b/core/gh_api.py @@ -14,7 +14,7 @@ class GitHub(object): self.github_repo = github_repo self.branch = branch - def _access_API(self, path, params=None): + def _access_api(self, path, params=None): """ Access the API at the path given and with the optional params given. """ @@ -32,7 +32,7 @@ class GitHub(object): Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/ """ - return self._access_API( + return self._access_api( ['repos', self.github_repo_user, self.github_repo, 'commits'], params={'per_page': 100, 'sha': self.branch}, ) @@ -49,7 +49,7 @@ class GitHub(object): Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ """ - return self._access_API( + return self._access_api( ['repos', self.github_repo_user, self.github_repo, 'compare', '{base}...{head}'.format(base=base, head=head)], params={'per_page': per_page}, diff --git a/core/logger.py b/core/logger.py index 1b5d0e9f..7720af33 100644 --- a/core/logger.py +++ b/core/logger.py @@ -58,7 +58,7 @@ class NTMRotatingLogHandler(object): handler.flush() handler.close() - def initLogging(self, consoleLogging=True): + def init_logging(self, consoleLogging=True): if consoleLogging: self.console_logging = consoleLogging diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 8df8c313..0c824176 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -7,7 +7,7 @@ import core from core import logger -def autoFork(section, inputCategory): +def auto_fork(section, inputCategory): # auto-detect correct section # config settings diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 2b555203..f5c49410 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -12,7 +12,7 @@ import core from core import logger -def dbFilename(filename="nzbtomedia.db", suffix=None): +def db_filename(filename="nzbtomedia.db", suffix=None): """ @param filename: The sqlite database filename to use. If not specified, will be made to be nzbtomedia.db @@ -29,13 +29,13 @@ class DBConnection(object): def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None): self.filename = filename - self.connection = sqlite3.connect(dbFilename(filename), 20) + self.connection = sqlite3.connect(db_filename(filename), 20) if row_type == "dict": self.connection.row_factory = self._dict_factory else: self.connection.row_factory = sqlite3.Row - def checkDBVersion(self): + def check_db_version(self): result = None try: result = self.select("SELECT db_version FROM db_version") @@ -196,7 +196,7 @@ class DBConnection(object): list(valueDict.values()) ) - def tableInfo(self, tableName): + def table_info(self, tableName): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName)) columns = {} @@ -212,7 +212,7 @@ class DBConnection(object): return d -def sanityCheckDatabase(connection, sanity_check): +def sanity_check_database(connection, sanity_check): sanity_check(connection).check() @@ -228,22 +228,22 @@ class DBSanityCheck(object): # = Upgrade API = # =============== -def upgradeDatabase(connection, schema): +def upgrade_database(connection, schema): logger.log(u"Checking database structure...", logger.MESSAGE) - _processUpgrade(connection, schema) + _process_upgrade(connection, schema) -def prettyName(class_name): +def pretty_name(class_name): return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)]) -def _processUpgrade(connection, upgradeClass): +def _process_upgrade(connection, upgradeClass): instance = upgradeClass(connection) logger.log(u"Checking {name} database upgrade".format - (name=prettyName(upgradeClass.__name__)), logger.DEBUG) + (name=pretty_name(upgradeClass.__name__)), logger.DEBUG) if not instance.test(): logger.log(u"Database upgrade required: {name}".format - (name=prettyName(upgradeClass.__name__)), logger.MESSAGE) + (name=pretty_name(upgradeClass.__name__)), logger.MESSAGE) try: instance.execute() except sqlite3.DatabaseError as error: @@ -257,7 +257,7 @@ def _processUpgrade(connection, upgradeClass): (name=upgradeClass.__name__), logger.DEBUG) for upgradeSubClass in upgradeClass.__subclasses__(): - _processUpgrade(connection, upgradeSubClass) + _process_upgrade(connection, upgradeSubClass) # Base migration class. All future DB changes should be subclassed from this class @@ -265,24 +265,24 @@ class SchemaUpgrade(object): def __init__(self, connection): self.connection = connection - def hasTable(self, tableName): + def has_table(self, tableName): return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0 - def hasColumn(self, tableName, column): - return column in self.connection.tableInfo(tableName) + def has_column(self, tableName, column): + return column in self.connection.table_info(tableName) - def addColumn(self, table, column, type="NUMERIC", default=0): + def add_column(self, table, column, type="NUMERIC", default=0): self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type)) self.connection.action("UPDATE {0} SET {1} = ?".format(table, column), (default,)) - def checkDBVersion(self): + def check_db_version(self): result = self.connection.select("SELECT db_version FROM db_version") if result: return int(result[-1]["db_version"]) else: return 0 - def incDBVersion(self): - new_version = self.checkDBVersion() + 1 + def inc_db_version(self): + new_version = self.check_db_version() + 1 self.connection.action("UPDATE db_version SET db_version = ?", [new_version]) return new_version diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 45cb6fce..5cb283a9 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -8,7 +8,7 @@ import subprocess import core from core import logger -from core.nzbToMediaUtil import listMediaFiles +from core.nzbToMediaUtil import list_media_files reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.", r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.", @@ -32,7 +32,7 @@ char_replace = [[r"(\w)1\.(\w)", r"\1i\2"] def process_all_exceptions(name, dirname): par2(dirname) rename_script(dirname) - for filename in listMediaFiles(dirname): + for filename in list_media_files(dirname): newfilename = None parent_dir = os.path.dirname(filename) head, file_extension = os.path.splitext(os.path.basename(filename)) diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index da353892..32d2f014 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -5,7 +5,7 @@ from subprocess import Popen import core from core import logger -from core.nzbToMediaUtil import import_subs, listMediaFiles, rmDir +from core.nzbToMediaUtil import import_subs, list_media_files, remove_dir from core.transcoder import transcoder @@ -40,8 +40,8 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1)) if core.CHECK_MEDIA: - for video in listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False): - if transcoder.isVideoGood(video, 0): + for video in list_media_files(outputDestination, media=True, audio=False, meta=False, archives=False): + if transcoder.is_video_good(video, 0): import_subs(video) else: logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT") @@ -111,7 +111,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination)) - rmDir(outputDestination) + remove_dir(outputDestination) elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format( num_files, num_files_new)) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 7d8bdcda..d24a89c6 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -32,11 +32,11 @@ requests.packages.urllib3.disable_warnings() # Monkey Patch shutil.copyfileobj() to adjust the buffer length to 512KB rather than 4KB shutil.copyfileobjOrig = shutil.copyfileobj -def copyfileobjFast(fsrc, fdst, length=512*1024): +def copyfileobj_fast(fsrc, fdst, length=512 * 1024): shutil.copyfileobjOrig(fsrc, fdst, length=length) -shutil.copyfileobj = copyfileobjFast +shutil.copyfileobj = copyfileobj_fast -def reportNzb(failure_link, clientAgent): +def report_nzb(failure_link, clientAgent): # Contact indexer site logger.info("Sending failure notification to indexer site") if clientAgent == 'nzbget': @@ -52,15 +52,15 @@ def reportNzb(failure_link, clientAgent): return -def sanitizeName(name): +def sanitize_name(name): """ - >>> sanitizeName('a/b/c') + >>> sanitize_name('a/b/c') 'a-b-c' - >>> sanitizeName('abc') + >>> sanitize_name('abc') 'abc' - >>> sanitizeName('a"b') + >>> sanitize_name('a"b') 'ab' - >>> sanitizeName('.a.b..') + >>> sanitize_name('.a.b..') 'a.b' """ @@ -78,7 +78,7 @@ def sanitizeName(name): return name -def makeDir(path): +def make_dir(path): if not os.path.isdir(path): try: os.makedirs(path) @@ -87,7 +87,7 @@ def makeDir(path): return True -def remoteDir(path): +def remote_dir(path): if not core.REMOTEPATHS: return path for local, remote in core.REMOTEPATHS: @@ -151,10 +151,10 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): input_directory = os.path.join(input_directory, input_name) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif input_name and os.path.isdir(os.path.join(input_directory, sanitizeName(input_name))): + elif input_name and os.path.isdir(os.path.join(input_directory, sanitize_name(input_name))): logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format( - sanitizeName(input_name), input_directory)) - input_directory = os.path.join(input_directory, sanitizeName(input_name)) + sanitize_name(input_name), input_directory)) + input_directory = os.path.join(input_directory, sanitize_name(input_name)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True elif input_name and os.path.isfile(os.path.join(input_directory, input_name)): @@ -162,10 +162,10 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): input_directory = os.path.join(input_directory, input_name) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif input_name and os.path.isfile(os.path.join(input_directory, sanitizeName(input_name))): + elif input_name and os.path.isfile(os.path.join(input_directory, sanitize_name(input_name))): logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format( - sanitizeName(input_name), input_directory)) - input_directory = os.path.join(input_directory, sanitizeName(input_name)) + sanitize_name(input_name), input_directory)) + input_directory = os.path.join(input_directory, sanitize_name(input_name)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True @@ -187,7 +187,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): pass if input_name and not tordir: - if input_name in pathlist or sanitizeName(input_name) in pathlist: + if input_name in pathlist or sanitize_name(input_name) in pathlist: logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(input_name)) tordir = True else: @@ -202,23 +202,23 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): return input_directory, input_name, input_category, root -def getDirSize(inputPath): +def get_dir_size(inputPath): from functools import partial prepend = partial(os.path.join, inputPath) return sum([ - (os.path.getsize(f) if os.path.isfile(f) else getDirSize(f)) + (os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) for f in map(prepend, os.listdir(text_type(inputPath))) ]) -def is_minSize(inputName, minSize): +def is_min_size(inputName, minSize): file_name, file_ext = os.path.splitext(os.path.basename(inputName)) # audio files we need to check directory size not file size input_size = os.path.getsize(inputName) if file_ext in core.AUDIOCONTAINER: try: - input_size = getDirSize(os.path.dirname(inputName)) + input_size = get_dir_size(os.path.dirname(inputName)) except: logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') return True @@ -249,7 +249,7 @@ def copy_link(src, targetLink, useLink): logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK') return True - makeDir(os.path.dirname(targetLink)) + make_dir(os.path.dirname(targetLink)) try: if useLink == 'dir': logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') @@ -311,7 +311,7 @@ def replace_links(link): def flatten(outputDestination): logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) - for outputFile in listMediaFiles(outputDestination): + for outputFile in list_media_files(outputDestination): dir_path = os.path.dirname(outputFile) file_name = os.path.basename(outputFile) @@ -325,10 +325,10 @@ def flatten(outputDestination): except: logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN') - removeEmptyFolders(outputDestination) # Cleanup empty directories + remove_empty_folders(outputDestination) # Cleanup empty directories -def removeEmptyFolders(path, removeRoot=True): +def remove_empty_folders(path, removeRoot=True): """Function to remove empty folders""" if not os.path.isdir(path): return @@ -340,7 +340,7 @@ def removeEmptyFolders(path, removeRoot=True): for f in files: fullpath = os.path.join(path, f) if os.path.isdir(fullpath): - removeEmptyFolders(fullpath) + remove_empty_folders(fullpath) # if folder empty, delete it files = os.listdir(text_type(path)) @@ -349,7 +349,7 @@ def removeEmptyFolders(path, removeRoot=True): os.rmdir(path) -def rmReadOnly(filename): +def remove_read_only(filename): if os.path.isfile(filename): # check first the read-only attribute file_attribute = os.stat(filename)[0] @@ -364,7 +364,7 @@ def rmReadOnly(filename): # Wake function -def WakeOnLan(ethernet_address): +def wake_on_lan(ethernet_address): addr_byte = ethernet_address.split(':') hw_addr = struct.pack(b'BBBBBB', int(addr_byte[0], 16), int(addr_byte[1], 16), @@ -386,7 +386,7 @@ def WakeOnLan(ethernet_address): # Test Connection function -def TestCon(host, port): +def test_connection(host, port): try: socket.create_connection((host, port)) return "Up" @@ -394,26 +394,26 @@ def TestCon(host, port): return "Down" -def WakeUp(): +def wake_up(): host = core.CFG["WakeOnLan"]["host"] port = int(core.CFG["WakeOnLan"]["port"]) mac = core.CFG["WakeOnLan"]["mac"] i = 1 - while TestCon(host, port) == "Down" and i < 4: + while test_connection(host, port) == "Down" and i < 4: logger.info(("Sending WakeOnLan Magic Packet for mac: {0}".format(mac))) - WakeOnLan(mac) + wake_on_lan(mac) time.sleep(20) i = i + 1 - if TestCon(host, port) == "Down": # final check. + if test_connection(host, port) == "Down": # final check. logger.warning("System with mac: {0} has not woken after 3 attempts. " "Continuing with the rest of the script.".format(mac)) else: logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac)) -def CharReplace(Name): +def char_replace(Name): name = Name # Special character hex range: # CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15) @@ -464,13 +464,13 @@ def convert_to_ascii(inputName, dirName): if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!. return input_name, dir_name - encoded, input_name = CharReplace(input_name) + encoded, input_name = char_replace(input_name) dir, base = os.path.split(dir_name) if not base: # ended with "/" dir, base = os.path.split(dir) - encoded, base2 = CharReplace(base) + encoded, base2 = char_replace(base) if encoded: dir_name = os.path.join(dir, base2) logger.info("Renaming directory to: {0}.".format(base2), 'ENCODER') @@ -480,14 +480,14 @@ def convert_to_ascii(inputName, dirName): for dirname, dirnames, filenames in os.walk(dir_name, topdown=False): for subdirname in dirnames: - encoded, subdirname2 = CharReplace(subdirname) + encoded, subdirname2 = char_replace(subdirname) if encoded: logger.info("Renaming directory to: {0}.".format(subdirname2), 'ENCODER') os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2)) for dirname, dirnames, filenames in os.walk(dir_name): for filename in filenames: - encoded, filename2 = CharReplace(filename) + encoded, filename2 = char_replace(filename) if encoded: logger.info("Renaming file to: {0}.".format(filename2), 'ENCODER') os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2)) @@ -646,10 +646,10 @@ def parse_args(clientAgent, args): return None, None, None, None, None -def getDirs(section, subsection, link='hard'): +def get_dirs(section, subsection, link='hard'): to_return = [] - def processDir(path): + def process_dir(path): folders = [] logger.info("Searching {0} for mediafiles to post-process ...".format(path)) @@ -674,7 +674,7 @@ def getDirs(section, subsection, link='hard'): album = f.album # create new path - new_path = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) + new_path = os.path.join(path, "{0} - {1}".format(sanitize_name(artist), sanitize_name(album))) elif file_ext in core.MEDIACONTAINER: f = guessit.guessit(mediafile) @@ -684,13 +684,13 @@ def getDirs(section, subsection, link='hard'): if not title: title = os.path.splitext(os.path.basename(mediafile))[0] - new_path = os.path.join(path, sanitizeName(title)) + new_path = os.path.join(path, sanitize_name(title)) except Exception as e: logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e)) if not new_path: title = os.path.splitext(os.path.basename(mediafile))[0] - new_path = os.path.join(path, sanitizeName(title)) + new_path = os.path.join(path, sanitize_name(title)) try: new_path = new_path.encode(core.SYS_ENCODING) @@ -704,9 +704,9 @@ def getDirs(section, subsection, link='hard'): # create new path if it does not exist if not os.path.exists(new_path): - makeDir(new_path) + make_dir(new_path) - newfile = os.path.join(new_path, sanitizeName(os.path.split(mediafile)[1])) + newfile = os.path.join(new_path, sanitize_name(os.path.split(mediafile)[1])) try: newfile = newfile.encode(core.SYS_ENCODING) except: @@ -731,9 +731,9 @@ def getDirs(section, subsection, link='hard'): try: watch_dir = os.path.join(core.CFG[section][subsection]["watch_dir"], subsection) if os.path.exists(watch_dir): - to_return.extend(processDir(watch_dir)) + to_return.extend(process_dir(watch_dir)) elif os.path.exists(core.CFG[section][subsection]["watch_dir"]): - to_return.extend(processDir(core.CFG[section][subsection]["watch_dir"])) + to_return.extend(process_dir(core.CFG[section][subsection]["watch_dir"])) except Exception as e: logger.error("Failed to add directories from {0} for post-processing: {1}".format (core.CFG[section][subsection]["watch_dir"], e)) @@ -742,7 +742,7 @@ def getDirs(section, subsection, link='hard'): try: output_directory = os.path.join(core.OUTPUTDIRECTORY, subsection) if os.path.exists(output_directory): - to_return.extend(processDir(output_directory)) + to_return.extend(process_dir(output_directory)) except Exception as e: logger.error("Failed to add directories from {0} for post-processing: {1}".format(core.OUTPUTDIRECTORY, e)) @@ -771,7 +771,7 @@ def onerror(func, path, exc_info): raise Exception -def rmDir(dirName): +def remove_dir(dirName): logger.info("Deleting {0}".format(dirName)) try: shutil.rmtree(text_type(dirName), onerror=onerror) @@ -779,19 +779,19 @@ def rmDir(dirName): logger.error("Unable to delete folder {0}".format(dirName)) -def cleanDir(path, section, subsection): +def clean_dir(path, section, subsection): cfg = dict(core.CFG[section][subsection]) if not os.path.exists(path): logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR') return if core.FORCE_CLEAN and not core.FAILED: logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') - rmDir(path) + remove_dir(path) return min_size = int(cfg.get('minSize', 0)) delete_ignored = int(cfg.get('delete_ignored', 0)) try: - num_files = len(listMediaFiles(path, minSize=min_size, delete_ignored=delete_ignored)) + num_files = len(list_media_files(path, minSize=min_size, delete_ignored=delete_ignored)) except: num_files = 'unknown' if num_files > 0: @@ -994,7 +994,7 @@ def get_nzoid(inputName): return nzoid -def cleanFileName(filename): +def clean_file_name(filename): """Cleans up nzb name by removing any . and _ characters, along with any trailing hyphens. @@ -1020,7 +1020,7 @@ def is_archive_file(filename): return False -def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): +def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): file_name, file_ext = os.path.splitext(mediafile) try: @@ -1039,14 +1039,14 @@ def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, oth return False -def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): +def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): files = [] if not os.path.isdir(path): if os.path.isfile(path): # Single file downloads. cur_file = os.path.split(path)[1] - if isMediaFile(cur_file, media, audio, meta, archives, other, otherext): + if is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(path) or not is_minSize(path, minSize): + if is_sample(path) or not is_min_size(path, minSize): if delete_ignored == 1: try: os.unlink(path) @@ -1064,11 +1064,11 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me # if it's a folder do it recursively if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): - files += listMediaFiles(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) + files += list_media_files(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) - elif isMediaFile(cur_file, media, audio, meta, archives, other, otherext): + elif is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(full_cur_file) or not is_minSize(full_cur_file, minSize): + if is_sample(full_cur_file) or not is_min_size(full_cur_file, minSize): if delete_ignored == 1: try: os.unlink(full_cur_file) @@ -1160,11 +1160,11 @@ def find_imdbid(dirName, inputName, omdbApiKey): return imdbid -def extractFiles(src, dst=None, keep_archive=None): +def extract_files(src, dst=None, keep_archive=None): extracted_folder = [] extracted_archive = [] - for inputFile in listMediaFiles(src, media=False, audio=False, meta=False, archives=True): + for inputFile in list_media_files(src, media=False, audio=False, meta=False, archives=True): dir_path = os.path.dirname(inputFile) full_file_name = os.path.basename(inputFile) archive_name = os.path.splitext(full_file_name)[0] @@ -1181,7 +1181,7 @@ def extractFiles(src, dst=None, keep_archive=None): logger.error("Extraction failed for: {0}".format(full_file_name)) for folder in extracted_folder: - for inputFile in listMediaFiles(folder, media=False, audio=False, meta=False, archives=True): + for inputFile in list_media_files(folder, media=False, audio=False, meta=False, archives=True): full_file_name = os.path.basename(inputFile) archive_name = os.path.splitext(full_file_name)[0] archive_name = re.sub(r"part[0-9]+", "", archive_name) @@ -1258,7 +1258,7 @@ def plex_update(category): logger.debug("Could not identify section for plex update", 'PLEX') -def backupVersionedFile(old_file, version): +def backup_versioned_file(old_file, version): num_tries = 0 new_file = '{old}.v{version}'.format(old=old_file, version=version) @@ -1287,7 +1287,7 @@ def backupVersionedFile(old_file, version): return True -def update_downloadInfoStatus(inputName, status): +def update_download_info_status(inputName, status): logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) my_db = nzbToMediaDB.DBConnection() @@ -1295,7 +1295,7 @@ def update_downloadInfoStatus(inputName, status): [status, datetime.date.today().toordinal(), text_type(inputName)]) -def get_downloadInfo(inputName, status): +def get_download_info(inputName, status): logger.db("Getting download info for {0} from the DB".format(inputName)) my_db = nzbToMediaDB.DBConnection() diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index cb812642..deb74e4b 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -13,17 +13,17 @@ from six import iteritems, text_type, string_types import core from core import logger -from core.nzbToMediaUtil import makeDir +from core.nzbToMediaUtil import make_dir -def isVideoGood(videofile, status): +def is_video_good(videofile, status): file_name_ext = os.path.basename(videofile) file_name, file_ext = os.path.splitext(file_name_ext) disable = False if file_ext not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED): disable = True else: - test_details, res = getVideoDetails(core.TEST_FILE) + test_details, res = get_video_details(core.TEST_FILE) if res != 0 or test_details.get("error"): disable = True logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER') @@ -41,7 +41,7 @@ def isVideoGood(videofile, status): return True logger.info('Checking [{0}] for corruption, please stand by ...'.format(file_name_ext), 'TRANSCODER') - video_details, result = getVideoDetails(videofile) + video_details, result = get_video_details(videofile) if result != 0: logger.error("FAILED: [{0}] is corrupted!".format(file_name_ext), 'TRANSCODER') @@ -72,7 +72,7 @@ def zip_out(file, img, bitbucket): return procin -def getVideoDetails(videofile, img=None, bitbucket=None): +def get_video_details(videofile, img=None, bitbucket=None): video_details = {} result = 1 file = videofile @@ -116,12 +116,12 @@ def getVideoDetails(videofile, img=None, bitbucket=None): return video_details, result -def buildCommands(file, newDir, movieName, bitbucket): +def build_commands(file, newDir, movieName, bitbucket): if isinstance(file, string_types): input_file = file if 'concat:' in file: file = file.split('|')[0].replace('concat:', '') - video_details, result = getVideoDetails(file) + video_details, result = get_video_details(file) dir, name = os.path.split(file) name, ext = os.path.splitext(name) check = re.match("VTS_([0-9][0-9])_[0-9]+", name) @@ -136,7 +136,7 @@ def buildCommands(file, newDir, movieName, bitbucket): else: img, data = next(iteritems(file)) name = data['name'] - video_details, result = getVideoDetails(data['files'][0], img, bitbucket) + video_details, result = get_video_details(data['files'][0], img, bitbucket) input_file = '-' file = '-' @@ -471,7 +471,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.SEMBED and os.path.isfile(file): for subfile in get_subs(file): - sub_details, result = getVideoDetails(subfile) + sub_details, result = get_video_details(subfile) if not sub_details or not sub_details.get("streams"): continue if core.SCODEC == "mov_text": @@ -528,7 +528,7 @@ def get_subs(file): def extract_subs(file, newfilePath, bitbucket): - video_details, result = getVideoDetails(file) + video_details, result = get_video_details(file) if not video_details: return @@ -586,7 +586,7 @@ def extract_subs(file, newfilePath, bitbucket): logger.error("Extracting subtitles has failed") -def processList(List, newDir, bitbucket): +def process_list(List, newDir, bitbucket): rem_list = [] new_list = [] combine = [] @@ -596,7 +596,7 @@ def processList(List, newDir, bitbucket): ext = os.path.splitext(item)[1].lower() if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") - new_list.extend(ripISO(item, newDir, bitbucket)) + new_list.extend(rip_iso(item, newDir, bitbucket)) rem_list.append(item) elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") @@ -614,9 +614,9 @@ def processList(List, newDir, bitbucket): else: continue if vts_path: - new_list.extend(combineVTS(vts_path)) + new_list.extend(combine_vts(vts_path)) if combine: - new_list.extend(combineCD(combine)) + new_list.extend(combine_cd(combine)) for file in new_list: if isinstance(file, string_types) and 'concat:' not in file and not os.path.isfile(file): success = False @@ -633,7 +633,7 @@ def processList(List, newDir, bitbucket): return List, rem_list, new_list, success -def ripISO(item, newDir, bitbucket): +def rip_iso(item, newDir, bitbucket): new_files = [] failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. @@ -681,7 +681,7 @@ def ripISO(item, newDir, bitbucket): return new_files -def combineVTS(vtsPath): +def combine_vts(vtsPath): new_files = [] combined = '' for n in range(99): @@ -705,7 +705,7 @@ def combineVTS(vtsPath): return new_files -def combineCD(combine): +def combine_cd(combine): new_files = [] for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]): concat = '' @@ -728,17 +728,17 @@ def print_cmd(command): logger.debug("calling command:{0}".format(cmd)) -def Transcode_directory(dirName): +def transcode_directory(dirName): if not core.FFMPEG: return 1, dirName logger.info("Checking for files to be transcoded") final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: new_dir = core.OUTPUTVIDEOPATH - makeDir(new_dir) + make_dir(new_dir) name = os.path.splitext(os.path.split(dirName)[1])[0] new_dir = os.path.join(new_dir, name) - makeDir(new_dir) + make_dir(new_dir) else: new_dir = dirName if platform.system() == 'Windows': @@ -746,8 +746,8 @@ def Transcode_directory(dirName): else: bitbucket = open('/dev/null') movie_name = os.path.splitext(os.path.split(dirName)[1])[0] - file_list = core.listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) - file_list, rem_list, new_list, success = processList(file_list, new_dir, bitbucket) + file_list = core.list_media_files(dirName, media=True, audio=False, meta=False, archives=False) + file_list, rem_list, new_list, success = process_list(file_list, new_dir, bitbucket) if not success: bitbucket.close() return 1, dirName @@ -755,7 +755,7 @@ def Transcode_directory(dirName): for file in file_list: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: continue - command = buildCommands(file, new_dir, movie_name, bitbucket) + command = build_commands(file, new_dir, movie_name, bitbucket) newfile_path = command[-1] # transcoding files may remove the original file, so make sure to extract subtitles first diff --git a/nzbToMedia.py b/nzbToMedia.py index 4079f87b..4735f018 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -635,7 +635,7 @@ from core.autoProcess.autoProcessMovie import autoProcessMovie from core.autoProcess.autoProcessMusic import autoProcessMusic from core.autoProcess.autoProcessTV import autoProcessTV from core.nzbToMediaUserScript import external_script -from core.nzbToMediaUtil import CharReplace, cleanDir, convert_to_ascii, extractFiles, getDirs, get_downloadInfo, get_nzoid, plex_update, update_downloadInfoStatus +from core.nzbToMediaUtil import char_replace, clean_dir, convert_to_ascii, extract_files, get_dirs, get_download_info, get_nzoid, plex_update, update_download_info_status try: text_type = unicode @@ -666,8 +666,8 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down input_name1 = input_name try: - encoded, input_directory1 = CharReplace(input_directory) - encoded, input_name1 = CharReplace(input_name) + encoded, input_directory1 = char_replace(input_directory) + encoded, input_name1 = char_replace(input_name) except: pass @@ -727,7 +727,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if extract == 1: logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) - extractFiles(input_directory) + extract_files(input_directory) logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) @@ -735,13 +735,13 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down result = autoProcessMovie().process(section_name, input_directory, input_name, status, clientAgent, download_id, input_category, failureLink) elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: - result = autoProcessTV().processEpisode(section_name, input_directory, input_name, status, clientAgent, - download_id, input_category, failureLink) + result = autoProcessTV().process_episode(section_name, input_directory, input_name, status, clientAgent, + download_id, input_category, failureLink) elif section_name in ["HeadPhones", "Lidarr"]: result = autoProcessMusic().process(section_name, input_directory, input_name, status, clientAgent, input_category) elif section_name == "Mylar": - result = autoProcessComics().processEpisode(section_name, input_directory, input_name, status, clientAgent, - input_category) + result = autoProcessComics().process_episode(section_name, input_directory, input_name, status, clientAgent, + input_category) elif section_name == "Gamez": result = autoProcessGames().process(section_name, input_directory, input_name, status, clientAgent, input_category) elif section_name == 'UserScript': @@ -754,10 +754,10 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if result[0] == 0: if clientAgent != 'manual': # update download status in our DB - update_downloadInfoStatus(input_name, 1) + update_download_info_status(input_name, 1) if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: # cleanup our processing folders of any misc unwanted files and empty directories - cleanDir(input_directory, section_name, input_category) + clean_dir(input_directory, section_name, input_category) return result @@ -879,11 +879,11 @@ def main(args, section=None): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dir_name in getDirs(section, subsection, link='move'): + for dir_name in get_dirs(section, subsection, link='move'): logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dir_name)) logger.info("Checking database for download info for {0} ...".format(os.path.basename(dir_name))) - core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dir_name), 0) + core.DOWNLOADINFO = get_download_info(os.path.basename(dir_name), 0) if core.DOWNLOADINFO: logger.info("Found download info for {0}, " "setting variables now ...".format diff --git a/tests/general.py b/tests/general.py index 89f403f8..e0410c14 100755 --- a/tests/general.py +++ b/tests/general.py @@ -5,7 +5,7 @@ import guessit import requests import core -from core.nzbToMediaAutoFork import autoFork +from core.nzbToMediaAutoFork import auto_fork from core.nzbToMediaUtil import server_responding from core.transcoder import transcoder @@ -15,7 +15,7 @@ core.initialize() #label = core.TORRENT_CLASS.core.get_torrent_status("f33a9c4b15cbd9170722d700069af86746817ade", ["label"]).get()['label'] #print label -if transcoder.isVideoGood(core.TEST_FILE, 0): +if transcoder.is_video_good(core.TEST_FILE, 0): print("FFPROBE Works") else: print("FFPROBE FAILED") @@ -25,7 +25,7 @@ print(test) section = core.CFG.findsection('tv').isenabled() print(section) print(len(section)) -fork, fork_params = autoFork('SickBeard', 'tv') +fork, fork_params = auto_fork('SickBeard', 'tv') if server_responding("http://127.0.0.1:5050"): print("CouchPotato Running") From 7f2a4d26050fc1745ea6de6ef462515fc70c88c9 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 16 Dec 2018 22:05:08 -0500 Subject: [PATCH 3/4] PEP8 Class name should be CamelCase --- TorrentToMedia.py | 20 ++++++++++---------- core/__init__.py | 10 +++++----- core/autoProcess/autoProcessComics.py | 2 +- core/autoProcess/autoProcessGames.py | 2 +- core/autoProcess/autoProcessMovie.py | 2 +- core/autoProcess/autoProcessMusic.py | 2 +- core/autoProcess/autoProcessTV.py | 2 +- nzbToMedia.py | 26 +++++++++++++------------- 8 files changed, 33 insertions(+), 33 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index e021ee1d..c06195c3 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -236,22 +236,22 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID result = external_script(output_destination, input_name, input_category, section) elif section_name in ['CouchPotato', 'Radarr']: - result = core.autoProcessMovie().process(section_name, output_destination, input_name, - status, clientAgent, input_hash, input_category) + result = core.Movie().process(section_name, output_destination, input_name, + status, clientAgent, input_hash, input_category) elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: if input_hash: input_hash = input_hash.upper() - result = core.autoProcessTV().process_episode(section_name, output_destination, input_name, - status, clientAgent, input_hash, input_category) + result = core.TV().process_episode(section_name, output_destination, input_name, + status, clientAgent, input_hash, input_category) elif section_name in ['HeadPhones', 'Lidarr']: - result = core.autoProcessMusic().process(section_name, output_destination, input_name, - status, clientAgent, input_category) + result = core.Music().process(section_name, output_destination, input_name, + status, clientAgent, input_category) elif section_name == 'Mylar': - result = core.autoProcessComics().process_episode(section_name, output_destination, input_name, - status, clientAgent, input_category) + result = core.Comic().process_episode(section_name, output_destination, input_name, + status, clientAgent, input_category) elif section_name == 'Gamez': - result = core.autoProcessGames().process(section_name, output_destination, input_name, - status, clientAgent, input_category) + result = core.Game().process(section_name, output_destination, input_name, + status, clientAgent, input_category) plex_update(input_category) diff --git a/core/__init__.py b/core/__init__.py index ca9f2d8d..c5fd2c95 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -37,11 +37,11 @@ import six from six.moves import reload_module from core import logger, nzbToMediaDB, versionCheck -from core.autoProcess.autoProcessComics import autoProcessComics -from core.autoProcess.autoProcessGames import autoProcessGames -from core.autoProcess.autoProcessMovie import autoProcessMovie -from core.autoProcess.autoProcessMusic import autoProcessMusic -from core.autoProcess.autoProcessTV import autoProcessTV +from core.autoProcess.autoProcessComics import Comic +from core.autoProcess.autoProcessGames import Game +from core.autoProcess.autoProcessMovie import Movie +from core.autoProcess.autoProcessMusic import Music +from core.autoProcess.autoProcessTV import TV from core.databases import mainDB from core.nzbToMediaConfig import config from core.nzbToMediaUtil import ( diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index c2d00bd1..b6381758 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -11,7 +11,7 @@ from core.nzbToMediaUtil import convert_to_ascii, remote_dir, server_responding requests.packages.urllib3.disable_warnings() -class autoProcessComics(object): +class Comic(object): def process_episode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): dir_name = dirName input_name = inputName diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index c45193c9..8f7b1190 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -12,7 +12,7 @@ from core.nzbToMediaUtil import convert_to_ascii, server_responding requests.packages.urllib3.disable_warnings() -class autoProcessGames(object): +class Game(object): def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): dir_name = dirName input_name = inputName diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 4a30e1cf..9629a9d5 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -15,7 +15,7 @@ from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() -class autoProcessMovie(object): +class Movie(object): def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None): results = {} params = {} diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index 343bbe92..f5eaeb1a 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -14,7 +14,7 @@ from core.nzbToMediaUtil import convert_to_ascii, list_media_files, remote_dir, requests.packages.urllib3.disable_warnings() -class autoProcessMusic(object): +class Music(object): def command_complete(self, url, params, headers, section): try: r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 0a2b7761..8d85fddc 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -18,7 +18,7 @@ from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() -class autoProcessTV(object): +class TV(object): def command_complete(self, url, params, headers, section): try: r = requests.get(url, params=params, headers=headers, stream=True, verify=False, timeout=(30, 60)) diff --git a/nzbToMedia.py b/nzbToMedia.py index 4735f018..8eae32cc 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -629,11 +629,11 @@ import sys import core from core import logger, nzbToMediaDB -from core.autoProcess.autoProcessComics import autoProcessComics -from core.autoProcess.autoProcessGames import autoProcessGames -from core.autoProcess.autoProcessMovie import autoProcessMovie -from core.autoProcess.autoProcessMusic import autoProcessMusic -from core.autoProcess.autoProcessTV import autoProcessTV +from core.autoProcess.autoProcessComics import Comic +from core.autoProcess.autoProcessGames import Game +from core.autoProcess.autoProcessMovie import Movie +from core.autoProcess.autoProcessMusic import Music +from core.autoProcess.autoProcessTV import TV from core.nzbToMediaUserScript import external_script from core.nzbToMediaUtil import char_replace, clean_dir, convert_to_ascii, extract_files, get_dirs, get_download_info, get_nzoid, plex_update, update_download_info_status @@ -732,18 +732,18 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) if section_name in ["CouchPotato", "Radarr"]: - result = autoProcessMovie().process(section_name, input_directory, input_name, status, clientAgent, download_id, - input_category, failureLink) + result = Movie().process(section_name, input_directory, input_name, status, clientAgent, download_id, + input_category, failureLink) elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: - result = autoProcessTV().process_episode(section_name, input_directory, input_name, status, clientAgent, - download_id, input_category, failureLink) + result = TV().process_episode(section_name, input_directory, input_name, status, clientAgent, + download_id, input_category, failureLink) elif section_name in ["HeadPhones", "Lidarr"]: - result = autoProcessMusic().process(section_name, input_directory, input_name, status, clientAgent, input_category) + result = Music().process(section_name, input_directory, input_name, status, clientAgent, input_category) elif section_name == "Mylar": - result = autoProcessComics().process_episode(section_name, input_directory, input_name, status, clientAgent, - input_category) + result = Comic().process_episode(section_name, input_directory, input_name, status, clientAgent, + input_category) elif section_name == "Gamez": - result = autoProcessGames().process(section_name, input_directory, input_name, status, clientAgent, input_category) + result = Game().process(section_name, input_directory, input_name, status, clientAgent, input_category) elif section_name == 'UserScript': result = external_script(input_directory, input_name, input_category, section[usercat]) else: From 41fa636fc27779609cdaf965f8d8b4d30cb543ca Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 16 Dec 2018 23:33:31 -0500 Subject: [PATCH 4/4] PEP8 Argument should be lowercase --- TorrentToMedia.py | 34 ++-- core/autoProcess/autoProcessComics.py | 7 +- core/autoProcess/autoProcessGames.py | 6 +- core/autoProcess/autoProcessMovie.py | 30 ++- core/autoProcess/autoProcessMusic.py | 33 ++-- core/autoProcess/autoProcessTV.py | 20 +- core/extractor/extractor.py | 30 +-- core/logger.py | 52 ++--- core/nzbToMediaAutoFork.py | 22 +-- core/nzbToMediaDB.py | 54 ++--- core/nzbToMediaSceneExceptions.py | 3 +- core/nzbToMediaUserScript.py | 18 +- core/nzbToMediaUtil.py | 274 +++++++++++++------------- core/transcoder/transcoder.py | 54 ++--- nzbToMedia.py | 39 ++-- 15 files changed, 326 insertions(+), 350 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index c06195c3..2d6ffbab 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -11,16 +11,12 @@ from core.nzbToMediaUtil import char_replace, convert_to_ascii, plex_update, rep from libs.six import text_type -def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): - input_directory = inputDirectory - input_name = inputName - input_category = inputCategory - input_hash = inputHash +def process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent): status = 1 # 1 = failed | 0 = success root = 0 found_file = 0 - if clientAgent != 'manual' and not core.DOWNLOADINFO: + if client_agent != 'manual' and not core.DOWNLOADINFO: logger.debug('Adding TORRENT download info for directory {0} to database'.format(input_directory)) my_db = nzbToMediaDB.DBConnection() @@ -37,8 +33,8 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID control_value_dict = {"input_directory": text_type(input_directory1)} new_value_dict = {"input_name": text_type(input_name1), "input_hash": text_type(input_hash), - "input_id": text_type(inputID), - "client_agent": text_type(clientAgent), + "input_id": text_type(input_id), + "client_agent": text_type(client_agent), "status": 0, "last_update": datetime.date.today().toordinal() } @@ -102,8 +98,8 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID extensions = section.get('user_script_mediaExtensions', "").lower().split(',') unique_path = int(section.get("unique_path", 1)) - if clientAgent != 'manual': - core.pause_torrent(clientAgent, input_hash, inputID, input_name) + if client_agent != 'manual': + core.pause_torrent(client_agent, input_hash, input_id, input_name) # In case input is not directory, make sure to create one. # This way Processing is isolated. @@ -237,21 +233,21 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID elif section_name in ['CouchPotato', 'Radarr']: result = core.Movie().process(section_name, output_destination, input_name, - status, clientAgent, input_hash, input_category) + status, client_agent, input_hash, input_category) elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: if input_hash: input_hash = input_hash.upper() result = core.TV().process_episode(section_name, output_destination, input_name, - status, clientAgent, input_hash, input_category) + status, client_agent, input_hash, input_category) elif section_name in ['HeadPhones', 'Lidarr']: result = core.Music().process(section_name, output_destination, input_name, - status, clientAgent, input_category) + status, client_agent, input_category) elif section_name == 'Mylar': result = core.Comic().process_episode(section_name, output_destination, input_name, - status, clientAgent, input_category) + status, client_agent, input_category) elif section_name == 'Gamez': result = core.Game().process(section_name, output_destination, input_name, - status, clientAgent, input_category) + status, client_agent, input_category) plex_update(input_category) @@ -259,13 +255,13 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID if not core.TORRENT_RESUME_ON_FAILURE: logger.error("A problem was reported in the autoProcess* script. " "Torrent won't resume seeding (settings)") - elif clientAgent != 'manual': + elif client_agent != 'manual': logger.error("A problem was reported in the autoProcess* script. " "If torrent was paused we will resume seeding") - core.resume_torrent(clientAgent, input_hash, inputID, input_name) + core.resume_torrent(client_agent, input_hash, input_id, input_name) else: - if clientAgent != 'manual': + if client_agent != 'manual': # update download status in our DB core.update_download_info_status(input_name, 1) @@ -276,7 +272,7 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID for file in files: logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) replace_links(os.path.join(dirpath, file)) - core.remove_torrent(clientAgent, input_hash, inputID, input_name) + core.remove_torrent(client_agent, input_hash, input_id, input_name) if not section_name == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index b6381758..47a2cb56 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -12,14 +12,11 @@ requests.packages.urllib3.disable_warnings() class Comic(object): - def process_episode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): - dir_name = dirName - input_name = inputName - + def process_episode(self, section, dir_name, input_name=None, status=0, client_agent='manual', input_category=None): apc_version = "2.04" comicrn_version = "1.01" - cfg = dict(core.CFG[section][inputCategory]) + cfg = dict(core.CFG[section][input_category]) host = cfg["host"] port = cfg["port"] diff --git a/core/autoProcess/autoProcessGames.py b/core/autoProcess/autoProcessGames.py index 8f7b1190..258c0137 100644 --- a/core/autoProcess/autoProcessGames.py +++ b/core/autoProcess/autoProcessGames.py @@ -13,12 +13,10 @@ requests.packages.urllib3.disable_warnings() class Game(object): - def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): - dir_name = dirName - input_name = inputName + def process(self, section, dir_name, input_name=None, status=0, client_agent='manual', input_category=None): status = int(status) - cfg = dict(core.CFG[section][inputCategory]) + cfg = dict(core.CFG[section][input_category]) host = cfg["host"] port = cfg["port"] diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 9629a9d5..9a3ed3db 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -16,23 +16,23 @@ requests.packages.urllib3.disable_warnings() class Movie(object): - def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None): + def get_release(self, base_url, imdb_id=None, download_id=None, release_id=None): results = {} params = {} # determine cmd and params to send to CouchPotato to get our results section = 'movies' cmd = "media.list" - if release_id or imdbid: + if release_id or imdb_id: section = 'media' cmd = "media.get" - params['id'] = release_id or imdbid + params['id'] = release_id or imdb_id - if not (release_id or imdbid or download_id): + if not (release_id or imdb_id or download_id): logger.debug("No information available to filter CP results") return results - url = "{0}{1}".format(baseURL, cmd) + url = "{0}{1}".format(base_url, cmd) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params)) try: @@ -145,11 +145,9 @@ class Movie(object): # ValueError catches simplejson's JSONDecodeError and json's ValueError return False - def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None): - dir_name = dirName - input_name = inputName + def process(self, section, dir_name, input_name=None, status=0, client_agent="manual", download_id="", input_category=None, failure_link=None): - cfg = dict(core.CFG[section][inputCategory]) + cfg = dict(core.CFG[section][input_category]) host = cfg["host"] port = cfg["port"] @@ -244,10 +242,10 @@ class Movie(object): logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section) if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') - if failureLink: - failureLink += '&corrupt=true' + if failure_link: + failure_link += '&corrupt=true' status = 1 - elif clientAgent == "manual": + elif client_agent == "manual": logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section) return [0, ""] # Success (as far as this script is concerned) else: @@ -275,7 +273,7 @@ class Movie(object): if not release and ".cp(tt" not in video and imdbid: video_name, video_ext = os.path.splitext(video) video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext) - if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): + if not (client_agent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): logger.debug('Renaming: {0} to: {1}'.format(video, video2)) os.rename(video, video2) @@ -285,7 +283,7 @@ class Movie(object): params = {} if download_id and release_id: - params['downloader'] = downloader or clientAgent + params['downloader'] = downloader or client_agent params['download_id'] = download_id params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name @@ -343,8 +341,8 @@ class Movie(object): else: core.FAILED = True logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section) - if failureLink: - report_nzb(failureLink, clientAgent) + if failure_link: + report_nzb(failure_link, client_agent) if section == "Radarr": logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section) diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index f5eaeb1a..f32ab0ae 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -32,8 +32,8 @@ class Music(object): logger.error("{0} did not return expected json data.".format(section), section) return None - def get_status(self, url, apikey, dirName): - logger.debug("Attempting to get current status for release:{0}".format(os.path.basename(dirName))) + def get_status(self, url, apikey, dir_name): + logger.debug("Attempting to get current status for release:{0}".format(os.path.basename(dir_name))) params = { 'apikey': apikey, @@ -55,13 +55,13 @@ class Music(object): return None for album in result: - if os.path.basename(dirName) == album['FolderName']: + if os.path.basename(dir_name) == album['FolderName']: return album["Status"].lower() - def force_process(self, params, url, apikey, inputName, dirName, section, wait_for): - release_status = self.get_status(url, apikey, dirName) + def force_process(self, params, url, apikey, input_name, dir_name, section, wait_for): + release_status = self.get_status(url, apikey, dir_name) if not release_status: - logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section) + logger.error("Could not find a status for {0}, is it in the wanted list ?".format(input_name), section) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) @@ -77,32 +77,29 @@ class Music(object): logger.error("Server returned status {0}".format(r.status_code), section) return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] elif r.text == "OK": - logger.postprocess("SUCCESS: Post-Processing started for {0} in folder {1} ...".format(inputName, dirName), section) + logger.postprocess("SUCCESS: Post-Processing started for {0} in folder {1} ...".format(input_name, dir_name), section) else: - logger.error("FAILED: Post-Processing has NOT started for {0} in folder {1}. exiting!".format(inputName, dirName), section) + logger.error("FAILED: Post-Processing has NOT started for {0} in folder {1}. exiting!".format(input_name, dir_name), section) return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. timeout = time.time() + 60 * wait_for while time.time() < timeout: - current_status = self.get_status(url, apikey, dirName) + current_status = self.get_status(url, apikey, dir_name) if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. logger.postprocess("SUCCESS: This release is now marked as status [{0}]".format(current_status), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] - if not os.path.isdir(dirName): - logger.postprocess("SUCCESS: The input directory {0} has been removed Processing must have finished.".format(dirName), section) - return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] + if not os.path.isdir(dir_name): + logger.postprocess("SUCCESS: The input directory {0} has been removed Processing must have finished.".format(dir_name), section) + return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] time.sleep(10 * wait_for) # The status hasn't changed. return [2, "no change"] - def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): - dir_name = dirName - input_name = inputName - + def process(self, section, dir_name, input_name=None, status=0, client_agent="manual", input_category=None): status = int(status) - cfg = dict(core.CFG[section][inputCategory]) + cfg = dict(core.CFG[section][input_category]) host = cfg["host"] port = cfg["port"] diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 8d85fddc..1636e55b 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -52,9 +52,9 @@ class TV(object): # ValueError catches simplejson's JSONDecodeError and json's ValueError return False - def process_episode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): + def process_episode(self, section, dir_name, input_name=None, failed=False, client_agent="manual", download_id=None, input_category=None, failure_link=None): - cfg = dict(core.CFG[section][inputCategory]) + cfg = dict(core.CFG[section][input_category]) host = cfg["host"] port = cfg["port"] @@ -67,7 +67,7 @@ class TV(object): if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): # auto-detect correct fork - fork, fork_params = auto_fork(section, inputCategory) + fork, fork_params = auto_fork(section, input_category) elif not username and not apikey: logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only') fork, fork_params = "None", {} @@ -78,7 +78,7 @@ class TV(object): delete_failed = int(cfg.get("delete_failed", 0)) nzb_extraction_by = cfg.get("nzbExtractionBy", "Downloader") process_method = cfg.get("process_method") - if clientAgent == core.TORRENT_CLIENTAGENT and core.USELINK == "move-sym": + if client_agent == core.TORRENT_CLIENTAGENT and core.USELINK == "move-sym": process_method = "symlink" remote_path = int(cfg.get("remote_path", 0)) wait_for = int(cfg.get("wait_for", 2)) @@ -113,7 +113,7 @@ class TV(object): if e.errno != errno.EEXIST: raise - if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != "Destination"): + if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != "Destination"): if input_name: process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) @@ -147,9 +147,9 @@ class TV(object): failed = 1 if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': print('[NZB] MARK=BAD') - if failureLink: - failureLink += '&corrupt=true' - elif clientAgent == "manual": + if failure_link: + failure_link += '&corrupt=true' + elif client_agent == "manual": logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section) return [0, ""] # Success (as far as this script is concerned) elif nzb_extraction_by == "Destination": @@ -248,8 +248,8 @@ class TV(object): logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section) else: core.FAILED = True - if failureLink: - report_nzb(failureLink, clientAgent) + if failure_link: + report_nzb(failure_link, client_agent) if 'failed' in fork_params: logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) elif section == "NzbDrone": diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 14a3a6ba..4796bb3e 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -11,7 +11,7 @@ from time import sleep import core -def extract(filePath, outputDestination): +def extract(file_path, output_destination): success = 0 # Using Windows if platform.system() == 'Windows': @@ -69,7 +69,7 @@ def extract(filePath, outputDestination): if not extract_commands: core.logger.warning("EXTRACTOR: No archive extracting programs found, plugin will be disabled") - ext = os.path.splitext(filePath) + ext = os.path.splitext(file_path) cmd = [] if ext[1] in (".gz", ".bz2", ".lzma"): # Check if this is a tar @@ -88,7 +88,7 @@ def extract(filePath, outputDestination): return False # Create outputDestination folder - core.make_dir(outputDestination) + core.make_dir(output_destination) if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))] @@ -96,25 +96,25 @@ def extract(filePath, outputDestination): passwords = [] core.logger.info("Extracting {file} to {destination}".format - (file=filePath, destination=outputDestination)) + (file=file_path, destination=output_destination)) core.logger.debug("Extracting {cmd} {file} {destination}".format - (cmd=cmd, file=filePath, destination=outputDestination)) + (cmd=cmd, file=file_path, destination=output_destination)) orig_files = [] orig_dirs = [] - for dir, subdirs, files in os.walk(outputDestination): + for dir, subdirs, files in os.walk(output_destination): for subdir in subdirs: orig_dirs.append(os.path.join(dir, subdir)) for file in files: orig_files.append(os.path.join(dir, file)) pwd = os.getcwd() # Get our Present Working Directory - os.chdir(outputDestination) # Not all unpack commands accept full paths, so just extract into this directory + os.chdir(output_destination) # Not all unpack commands accept full paths, so just extract into this directory devnull = open(os.devnull, 'w') try: # now works same for nt and *nix info = None - cmd.append(filePath) # add filePath to final cmd arg. + cmd.append(file_path) # add filePath to final cmd arg. if platform.system() == 'Windows': info = subprocess.STARTUPINFO() info.dwFlags |= subprocess.STARTF_USESHOWWINDOW @@ -126,7 +126,7 @@ def extract(filePath, outputDestination): res = p.wait() if res == 0: # Both Linux and Windows return 0 for successful. core.logger.info("EXTRACTOR: Extraction was successful for {file} to {destination}".format - (file=filePath, destination=outputDestination)) + (file=file_path, destination=output_destination)) success = 1 elif len(passwords) > 0: core.logger.info("EXTRACTOR: Attempting to extract with passwords") @@ -142,7 +142,7 @@ def extract(filePath, outputDestination): if (res >= 0 and platform == 'Windows') or res == 0: core.logger.info("EXTRACTOR: Extraction was successful " "for {file} to {destination} using password: {pwd}".format - (file=filePath, destination=outputDestination, pwd=password)) + (file=file_path, destination=output_destination, pwd=password)) success = 1 break else: @@ -150,7 +150,7 @@ def extract(filePath, outputDestination): except: core.logger.error("EXTRACTOR: Extraction failed for {file}. " "Could not call command {cmd}".format - (file=filePath, cmd=cmd)) + (file=file_path, cmd=cmd)) os.chdir(pwd) return False @@ -159,8 +159,8 @@ def extract(filePath, outputDestination): if success: # sleep to let files finish writing to disk sleep(3) - perms = stat.S_IMODE(os.lstat(os.path.split(filePath)[0]).st_mode) - for dir, subdirs, files in os.walk(outputDestination): + perms = stat.S_IMODE(os.lstat(os.path.split(file_path)[0]).st_mode) + for dir, subdirs, files in os.walk(output_destination): for subdir in subdirs: if not os.path.join(dir, subdir) in orig_files: try: @@ -170,12 +170,12 @@ def extract(filePath, outputDestination): for file in files: if not os.path.join(dir, file) in orig_files: try: - shutil.copymode(filePath, os.path.join(dir, file)) + shutil.copymode(file_path, os.path.join(dir, file)) except: pass return True else: core.logger.error("EXTRACTOR: Extraction failed for {file}. " "Result was {result}".format - (file=filePath, result=res)) + (file=file_path, result=res)) return False diff --git a/core/logger.py b/core/logger.py index 7720af33..1e3881ed 100644 --- a/core/logger.py +++ b/core/logger.py @@ -58,10 +58,10 @@ class NTMRotatingLogHandler(object): handler.flush() handler.close() - def init_logging(self, consoleLogging=True): + def init_logging(self, console_logging=True): - if consoleLogging: - self.console_logging = consoleLogging + if console_logging: + self.console_logging = console_logging old_handler = None @@ -180,7 +180,7 @@ class NTMRotatingLogHandler(object): pp_logger.addHandler(new_file_handler) db_logger.addHandler(new_file_handler) - def log(self, toLog, logLevel=MESSAGE, section='MAIN'): + def log(self, to_log, log_level=MESSAGE, section='MAIN'): with self.log_lock: @@ -193,7 +193,7 @@ class NTMRotatingLogHandler(object): self.writes_since_check += 1 try: - message = u"{0}: {1}".format(section.upper(), toLog) + message = u"{0}: {1}".format(section.upper(), to_log) except UnicodeError: message = u"{0}: Message contains non-utf-8 string".format(section.upper()) @@ -206,22 +206,22 @@ class NTMRotatingLogHandler(object): setattr(db_logger, 'db', lambda *args: db_logger.log(DB, *args)) try: - if logLevel == DEBUG: + if log_level == DEBUG: if core.LOG_DEBUG == 1: ntm_logger.debug(out_line) - elif logLevel == MESSAGE: + elif log_level == MESSAGE: ntm_logger.info(out_line) - elif logLevel == WARNING: + elif log_level == WARNING: ntm_logger.warning(out_line) - elif logLevel == ERROR: + elif log_level == ERROR: ntm_logger.error(out_line) - elif logLevel == POSTPROCESS: + elif log_level == POSTPROCESS: pp_logger.postprocess(out_line) - elif logLevel == DB: + elif log_level == DB: if core.LOG_DB == 1: db_logger.db(out_line) else: - ntm_logger.info(logLevel, out_line) + ntm_logger.info(log_level, out_line) except ValueError: pass @@ -249,32 +249,32 @@ class DispatchingFormatter(object): ntm_log_instance = NTMRotatingLogHandler(core.LOG_FILE, NUM_LOGS, LOG_SIZE) -def log(toLog, logLevel=MESSAGE, section='MAIN'): - ntm_log_instance.log(toLog, logLevel, section) +def log(to_log, log_level=MESSAGE, section='MAIN'): + ntm_log_instance.log(to_log, log_level, section) -def info(toLog, section='MAIN'): - log(toLog, MESSAGE, section) +def info(to_log, section='MAIN'): + log(to_log, MESSAGE, section) -def error(toLog, section='MAIN'): - log(toLog, ERROR, section) +def error(to_log, section='MAIN'): + log(to_log, ERROR, section) -def warning(toLog, section='MAIN'): - log(toLog, WARNING, section) +def warning(to_log, section='MAIN'): + log(to_log, WARNING, section) -def debug(toLog, section='MAIN'): - log(toLog, DEBUG, section) +def debug(to_log, section='MAIN'): + log(to_log, DEBUG, section) -def postprocess(toLog, section='POSTPROCESS'): - log(toLog, POSTPROCESS, section) +def postprocess(to_log, section='POSTPROCESS'): + log(to_log, POSTPROCESS, section) -def db(toLog, section='DB'): - log(toLog, DB, section) +def db(to_log, section='DB'): + log(to_log, DB, section) def log_error_and_exit(error_msg): diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 0c824176..ca8df914 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -7,11 +7,11 @@ import core from core import logger -def auto_fork(section, inputCategory): +def auto_fork(section, input_category): # auto-detect correct section # config settings - cfg = dict(core.CFG[section][inputCategory]) + cfg = dict(core.CFG[section][input_category]) host = cfg.get("host") port = cfg.get("port") @@ -31,26 +31,26 @@ def auto_fork(section, inputCategory): detected = False if section == "NzbDrone": logger.info("Attempting to verify {category} fork".format - (category=inputCategory)) + (category=input_category)) url = "{protocol}{host}:{port}{root}/api/rootfolder".format( protocol=protocol, host=host, port=port, root=web_root) headers = {"X-Api-Key": apikey} try: r = requests.get(url, headers=headers, stream=True, verify=False) except requests.ConnectionError: - logger.warning("Could not connect to {0}:{1} to verify fork!".format(section, inputCategory)) + logger.warning("Could not connect to {0}:{1} to verify fork!".format(section, input_category)) if not r.ok: logger.warning("Connection to {section}:{category} failed! " "Check your configuration".format - (section=section, category=inputCategory)) + (section=section, category=input_category)) fork = ['default', {}] elif fork == "auto": params = core.ALL_FORKS rem_params = [] - logger.info("Attempting to auto-detect {category} fork".format(category=inputCategory)) + logger.info("Attempting to auto-detect {category} fork".format(category=input_category)) # define the order to test. Default must be first since the default fork doesn't reject parameters. # then in order of most unique parameters. @@ -75,7 +75,7 @@ def auto_fork(section, inputCategory): r = s.get(url, auth=(username, password), verify=False) except requests.ConnectionError: logger.info("Could not connect to {section}:{category} to perform auto-fork detection!".format - (section=section, category=inputCategory)) + (section=section, category=input_category)) r = [] if r and r.ok: if apikey: @@ -99,16 +99,16 @@ def auto_fork(section, inputCategory): break if detected: logger.info("{section}:{category} fork auto-detection successful ...".format - (section=section, category=inputCategory)) + (section=section, category=input_category)) elif rem_params: logger.info("{section}:{category} fork auto-detection found custom params {params}".format - (section=section, category=inputCategory, params=params)) + (section=section, category=input_category, params=params)) fork = ['custom', params] else: logger.info("{section}:{category} fork auto-detection failed".format - (section=section, category=inputCategory)) + (section=section, category=input_category)) fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)] logger.info("{section}:{category} fork set to {fork}".format - (section=section, category=inputCategory, fork=fork[0])) + (section=section, category=input_category, fork=fork[0])) return fork[0], fork[1] diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index f5c49410..939af5b0 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -85,7 +85,7 @@ class DBConnection(object): return sql_result - def mass_action(self, querylist, logTransaction=False): + def mass_action(self, querylist, log_transaction=False): if querylist is None: return @@ -96,11 +96,11 @@ class DBConnection(object): try: for qu in querylist: if len(qu) == 1: - if logTransaction: + if log_transaction: logger.log(qu[0], logger.DEBUG) sql_result.append(self.connection.execute(qu[0])) elif len(qu) > 1: - if logTransaction: + if log_transaction: logger.log(u"{query} with args {args}".format(query=qu[0], args=qu[1]), logger.DEBUG) sql_result.append(self.connection.execute(qu[0], qu[1])) self.connection.commit() @@ -167,20 +167,20 @@ class DBConnection(object): return sql_results - def upsert(self, tableName, valueDict, keyDict): + def upsert(self, table_name, value_dict, key_dict): changes_before = self.connection.total_changes - gen_params = lambda myDict: ["{key} = ?".format(key=k) for k in myDict.keys()] + gen_params = lambda my_dict: ["{key} = ?".format(key=k) for k in my_dict.keys()] - items = list(valueDict.values()) + list(keyDict.values()) + items = list(value_dict.values()) + list(key_dict.values()) self.action( "UPDATE {table} " "SET {params} " "WHERE {conditions}".format( - table=tableName, - params=", ".join(gen_params(valueDict)), - conditions=" AND ".join(gen_params(keyDict)) + table=table_name, + params=", ".join(gen_params(value_dict)), + conditions=" AND ".join(gen_params(key_dict)) ), items ) @@ -189,16 +189,16 @@ class DBConnection(object): self.action( "INSERT OR IGNORE INTO {table} ({columns}) " "VALUES ({values})".format( - table=tableName, - columns=", ".join(map(text_type, valueDict.keys())), - values=", ".join(["?"] * len(valueDict.values())) + table=table_name, + columns=", ".join(map(text_type, value_dict.keys())), + values=", ".join(["?"] * len(value_dict.values())) ), - list(valueDict.values()) + list(value_dict.values()) ) - def table_info(self, tableName): + def table_info(self, table_name): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually - cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName)) + cursor = self.connection.execute("PRAGMA table_info({0})".format(table_name)) columns = {} for column in cursor: columns[column['name']] = {'type': column['type']} @@ -237,26 +237,26 @@ def pretty_name(class_name): return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)]) -def _process_upgrade(connection, upgradeClass): - instance = upgradeClass(connection) +def _process_upgrade(connection, upgrade_class): + instance = upgrade_class(connection) logger.log(u"Checking {name} database upgrade".format - (name=pretty_name(upgradeClass.__name__)), logger.DEBUG) + (name=pretty_name(upgrade_class.__name__)), logger.DEBUG) if not instance.test(): logger.log(u"Database upgrade required: {name}".format - (name=pretty_name(upgradeClass.__name__)), logger.MESSAGE) + (name=pretty_name(upgrade_class.__name__)), logger.MESSAGE) try: instance.execute() except sqlite3.DatabaseError as error: print(u"Error in {name}: {msg}".format - (name=upgradeClass.__name__, msg=error)) + (name=upgrade_class.__name__, msg=error)) raise logger.log(u"{name} upgrade completed".format - (name=upgradeClass.__name__), logger.DEBUG) + (name=upgrade_class.__name__), logger.DEBUG) else: logger.log(u"{name} upgrade not required".format - (name=upgradeClass.__name__), logger.DEBUG) + (name=upgrade_class.__name__), logger.DEBUG) - for upgradeSubClass in upgradeClass.__subclasses__(): + for upgradeSubClass in upgrade_class.__subclasses__(): _process_upgrade(connection, upgradeSubClass) @@ -265,11 +265,11 @@ class SchemaUpgrade(object): def __init__(self, connection): self.connection = connection - def has_table(self, tableName): - return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0 + def has_table(self, table_name): + return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (table_name,)).fetchall()) > 0 - def has_column(self, tableName, column): - return column in self.connection.table_info(tableName) + def has_column(self, table_name, column): + return column in self.connection.table_info(table_name) def add_column(self, table, column, type="NUMERIC", default=0): self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type)) diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 5cb283a9..6a93d514 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -65,8 +65,7 @@ def strip_groups(filename): return newfile_path -def rename_file(filename, newfilePath): - newfile_path = newfilePath +def rename_file(filename, newfile_path): if os.path.isfile(newfile_path): newfile_path = os.path.splitext(newfile_path)[0] + ".NTM" + os.path.splitext(newfile_path)[1] logger.debug("Replacing file name {old} with download name {new}".format diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index 32d2f014..c4885444 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -9,7 +9,7 @@ from core.nzbToMediaUtil import import_subs, list_media_files, remove_dir from core.transcoder import transcoder -def external_script(outputDestination, torrentName, torrentLabel, settings): +def external_script(output_destination, torrent_name, torrent_label, settings): final_result = 0 # start at 0. num_files = 0 try: @@ -40,14 +40,14 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1)) if core.CHECK_MEDIA: - for video in list_media_files(outputDestination, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(output_destination, media=True, audio=False, meta=False, archives=False): if transcoder.is_video_good(video, 0): import_subs(video) else: logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT") os.unlink(video) - for dirpath, dirnames, filenames in os.walk(outputDestination): + for dirpath, dirnames, filenames in os.walk(output_destination): for file in filenames: file_path = core.os.path.join(dirpath, file) @@ -66,14 +66,14 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): command.append('{0}'.format(file_path)) continue elif param == "TN": - command.append('{0}'.format(torrentName)) + command.append('{0}'.format(torrent_name)) continue elif param == "TL": - command.append('{0}'.format(torrentLabel)) + command.append('{0}'.format(torrent_label)) continue elif param == "DN": if core.USER_SCRIPT_RUNONCE == 1: - command.append('{0}'.format(outputDestination)) + command.append('{0}'.format(output_destination)) else: command.append('{0}'.format(dirpath)) continue @@ -102,7 +102,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): final_result += result num_files_new = 0 - for dirpath, dirnames, filenames in os.walk(outputDestination): + for dirpath, dirnames, filenames in os.walk(output_destination): for file in filenames: file_name, file_extension = os.path.splitext(file) @@ -110,8 +110,8 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): num_files_new += 1 if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: - logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination)) - remove_dir(outputDestination) + logger.info("All files have been processed. Cleaning outputDirectory {0}".format(output_destination)) + remove_dir(output_destination) elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format( num_files, num_files_new)) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index d24a89c6..21985623 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -11,6 +11,7 @@ import socket import stat import struct import time +from functools import partial import beets import guessit @@ -36,12 +37,12 @@ def copyfileobj_fast(fsrc, fdst, length=512 * 1024): shutil.copyfileobjOrig(fsrc, fdst, length=length) shutil.copyfileobj = copyfileobj_fast -def report_nzb(failure_link, clientAgent): +def report_nzb(failure_link, client_agent): # Contact indexer site logger.info("Sending failure notification to indexer site") - if clientAgent == 'nzbget': + if client_agent == 'nzbget': headers = {'User-Agent': 'NZBGet / nzbToMedia.py'} - elif clientAgent == 'sabnzbd': + elif client_agent == 'sabnzbd': headers = {'User-Agent': 'SABnzbd / nzbToMedia.py'} else: return @@ -105,10 +106,7 @@ def remote_dir(path): return path -def category_search(inputDirectory, inputName, inputCategory, root, categories): - input_directory = inputDirectory - input_category = inputCategory - input_name = inputName +def category_search(input_directory, input_name, input_category, root, categories): tordir = False try: @@ -202,85 +200,84 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): return input_directory, input_name, input_category, root -def get_dir_size(inputPath): - from functools import partial - prepend = partial(os.path.join, inputPath) +def get_dir_size(input_path): + prepend = partial(os.path.join, input_path) return sum([ (os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) - for f in map(prepend, os.listdir(text_type(inputPath))) + for f in map(prepend, os.listdir(text_type(input_path))) ]) -def is_min_size(inputName, minSize): - file_name, file_ext = os.path.splitext(os.path.basename(inputName)) +def is_min_size(input_name, min_size): + file_name, file_ext = os.path.splitext(os.path.basename(input_name)) # audio files we need to check directory size not file size - input_size = os.path.getsize(inputName) + input_size = os.path.getsize(input_name) if file_ext in core.AUDIOCONTAINER: try: - input_size = get_dir_size(os.path.dirname(inputName)) + input_size = get_dir_size(os.path.dirname(input_name)) except: - logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') + logger.error("Failed to get file size for {0}".format(input_name), 'MINSIZE') return True # Ignore files under a certain size - if input_size > minSize * 1048576: + if input_size > min_size * 1048576: return True -def is_sample(inputName): +def is_sample(input_name): # Ignore 'sample' in files - if re.search('(^|[\W_])sample\d*[\W_]', inputName.lower()): + if re.search('(^|[\W_])sample\d*[\W_]', input_name.lower()): return True -def copy_link(src, targetLink, useLink): - logger.info("MEDIAFILE: [{0}]".format(os.path.basename(targetLink)), 'COPYLINK') +def copy_link(src, target_link, use_link): + logger.info("MEDIAFILE: [{0}]".format(os.path.basename(target_link)), 'COPYLINK') logger.info("SOURCE FOLDER: [{0}]".format(os.path.dirname(src)), 'COPYLINK') - logger.info("TARGET FOLDER: [{0}]".format(os.path.dirname(targetLink)), 'COPYLINK') + logger.info("TARGET FOLDER: [{0}]".format(os.path.dirname(target_link)), 'COPYLINK') - if src != targetLink and os.path.exists(targetLink): + if src != target_link and os.path.exists(target_link): logger.info("MEDIAFILE already exists in the TARGET folder, skipping ...", 'COPYLINK') return True - elif src == targetLink and os.path.isfile(targetLink) and os.path.isfile(src): + elif src == target_link and os.path.isfile(target_link) and os.path.isfile(src): logger.info("SOURCE AND TARGET files are the same, skipping ...", 'COPYLINK') return True - elif src == os.path.dirname(targetLink): + elif src == os.path.dirname(target_link): logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK') return True - make_dir(os.path.dirname(targetLink)) + make_dir(os.path.dirname(target_link)) try: - if useLink == 'dir': + if use_link == 'dir': logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') - linktastic.dirlink(src, targetLink) + linktastic.dirlink(src, target_link) return True - if useLink == 'junction': + if use_link == 'junction': logger.info("Directory junction linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') - linktastic.dirlink(src, targetLink) + linktastic.dirlink(src, target_link) return True - elif useLink == "hard": + elif use_link == "hard": logger.info("Hard linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') - linktastic.link(src, targetLink) + linktastic.link(src, target_link) return True - elif useLink == "sym": + elif use_link == "sym": logger.info("Sym linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') - linktastic.symlink(src, targetLink) + linktastic.symlink(src, target_link) return True - elif useLink == "move-sym": + elif use_link == "move-sym": logger.info("Sym linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') - shutil.move(src, targetLink) - linktastic.symlink(targetLink, src) + shutil.move(src, target_link) + linktastic.symlink(target_link, src) return True - elif useLink == "move": + elif use_link == "move": logger.info("Moving SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') - shutil.move(src, targetLink) + shutil.move(src, target_link) return True except Exception as e: logger.warning("Error: {0}, copying instead ... ".format(e), 'COPYLINK') logger.info("Copying SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') - shutil.copy(src, targetLink) + shutil.copy(src, target_link) return True @@ -309,26 +306,26 @@ def replace_links(link): linktastic.symlink(target, link) -def flatten(outputDestination): - logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) - for outputFile in list_media_files(outputDestination): +def flatten(output_destination): + logger.info("FLATTEN: Flattening directory: {0}".format(output_destination)) + for outputFile in list_media_files(output_destination): dir_path = os.path.dirname(outputFile) file_name = os.path.basename(outputFile) - if dir_path == outputDestination: + if dir_path == output_destination: continue - target = os.path.join(outputDestination, file_name) + target = os.path.join(output_destination, file_name) try: shutil.move(outputFile, target) except: logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN') - remove_empty_folders(outputDestination) # Cleanup empty directories + remove_empty_folders(output_destination) # Cleanup empty directories -def remove_empty_folders(path, removeRoot=True): +def remove_empty_folders(path, remove_root=True): """Function to remove empty folders""" if not os.path.isdir(path): return @@ -344,7 +341,7 @@ def remove_empty_folders(path, removeRoot=True): # if folder empty, delete it files = os.listdir(text_type(path)) - if len(files) == 0 and removeRoot: + if len(files) == 0 and remove_root: logger.debug("Removing empty folder:{}".format(path)) os.rmdir(path) @@ -413,8 +410,7 @@ def wake_up(): logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac)) -def char_replace(Name): - name = Name +def char_replace(name): # Special character hex range: # CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15) # UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF @@ -456,9 +452,7 @@ def char_replace(Name): return encoded, name -def convert_to_ascii(inputName, dirName): - input_name = inputName - dir_name = dirName +def convert_to_ascii(input_name, dir_name): ascii_convert = int(core.CFG["ASCII"]["convert"]) if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!. @@ -629,7 +623,7 @@ def parse_qbittorrent(args): return input_directory, input_name, input_category, input_hash, input_id -def parse_args(clientAgent, args): +def parse_args(client_agent, args): clients = { 'other': parse_other, 'rtorrent': parse_rtorrent, @@ -641,7 +635,7 @@ def parse_args(clientAgent, args): } try: - return clients[clientAgent](args) + return clients[client_agent](args) except: return None, None, None, None, None @@ -771,12 +765,12 @@ def onerror(func, path, exc_info): raise Exception -def remove_dir(dirName): - logger.info("Deleting {0}".format(dirName)) +def remove_dir(dir_name): + logger.info("Deleting {0}".format(dir_name)) try: - shutil.rmtree(text_type(dirName), onerror=onerror) + shutil.rmtree(text_type(dir_name), onerror=onerror) except: - logger.error("Unable to delete folder {0}".format(dirName)) + logger.error("Unable to delete folder {0}".format(dir_name)) def clean_dir(path, section, subsection): @@ -791,7 +785,7 @@ def clean_dir(path, section, subsection): min_size = int(cfg.get('minSize', 0)) delete_ignored = int(cfg.get('delete_ignored', 0)) try: - num_files = len(list_media_files(path, minSize=min_size, delete_ignored=delete_ignored)) + num_files = len(list_media_files(path, min_size=min_size, delete_ignored=delete_ignored)) except: num_files = 'unknown' if num_files > 0: @@ -807,39 +801,39 @@ def clean_dir(path, section, subsection): logger.error("Unable to delete directory {0}".format(path)) -def create_torrent_class(clientAgent): +def create_torrent_class(client_agent): # Hardlink solution for Torrents tc = None - if clientAgent == 'utorrent': + if client_agent == 'utorrent': try: - logger.debug("Connecting to {0}: {1}".format(clientAgent, core.UTORRENTWEBUI)) + logger.debug("Connecting to {0}: {1}".format(client_agent, core.UTORRENTWEBUI)) tc = UTorrentClient(core.UTORRENTWEBUI, core.UTORRENTUSR, core.UTORRENTPWD) except: logger.error("Failed to connect to uTorrent") - if clientAgent == 'transmission': + if client_agent == 'transmission': try: logger.debug("Connecting to {0}: http://{1}:{2}".format( - clientAgent, core.TRANSMISSIONHOST, core.TRANSMISSIONPORT)) + client_agent, core.TRANSMISSIONHOST, core.TRANSMISSIONPORT)) tc = TransmissionClient(core.TRANSMISSIONHOST, core.TRANSMISSIONPORT, core.TRANSMISSIONUSR, core.TRANSMISSIONPWD) except: logger.error("Failed to connect to Transmission") - if clientAgent == 'deluge': + if client_agent == 'deluge': try: - logger.debug("Connecting to {0}: http://{1}:{2}".format(clientAgent, core.DELUGEHOST, core.DELUGEPORT)) + logger.debug("Connecting to {0}: http://{1}:{2}".format(client_agent, core.DELUGEHOST, core.DELUGEPORT)) tc = DelugeClient() tc.connect(host=core.DELUGEHOST, port=core.DELUGEPORT, username=core.DELUGEUSR, password=core.DELUGEPWD) except: logger.error("Failed to connect to Deluge") - if clientAgent == 'qbittorrent': + if client_agent == 'qbittorrent': try: - logger.debug("Connecting to {0}: http://{1}:{2}".format(clientAgent, core.QBITTORRENTHOST, core.QBITTORRENTPORT)) + logger.debug("Connecting to {0}: http://{1}:{2}".format(client_agent, core.QBITTORRENTHOST, core.QBITTORRENTPORT)) tc = qBittorrentClient("http://{0}:{1}/".format(core.QBITTORRENTHOST, core.QBITTORRENTPORT)) tc.login(core.QBITTORRENTUSR, core.QBITTORRENTPWD) except: @@ -848,81 +842,81 @@ def create_torrent_class(clientAgent): return tc -def pause_torrent(clientAgent, inputHash, inputID, inputName): - logger.debug("Stopping torrent {0} in {1} while processing".format(inputName, clientAgent)) +def pause_torrent(client_agent, input_hash, input_id, input_name): + logger.debug("Stopping torrent {0} in {1} while processing".format(input_name, client_agent)) try: - if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.stop(inputHash) - if clientAgent == 'transmission' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.stop_torrent(inputID) - if clientAgent == 'deluge' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.core.pause_torrent([inputID]) - if clientAgent == 'qbittorrent' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.pause(inputHash) + if client_agent == 'utorrent' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.stop(input_hash) + if client_agent == 'transmission' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.stop_torrent(input_id) + if client_agent == 'deluge' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.core.pause_torrent([input_id]) + if client_agent == 'qbittorrent' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.pause(input_hash) time.sleep(5) except: - logger.warning("Failed to stop torrent {0} in {1}".format(inputName, clientAgent)) + logger.warning("Failed to stop torrent {0} in {1}".format(input_name, client_agent)) -def resume_torrent(clientAgent, inputHash, inputID, inputName): +def resume_torrent(client_agent, input_hash, input_id, input_name): if not core.TORRENT_RESUME == 1: return - logger.debug("Starting torrent {0} in {1}".format(inputName, clientAgent)) + logger.debug("Starting torrent {0} in {1}".format(input_name, client_agent)) try: - if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.start(inputHash) - if clientAgent == 'transmission' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.start_torrent(inputID) - if clientAgent == 'deluge' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.core.resume_torrent([inputID]) - if clientAgent == 'qbittorrent' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.resume(inputHash) + if client_agent == 'utorrent' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.start(input_hash) + if client_agent == 'transmission' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.start_torrent(input_id) + if client_agent == 'deluge' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.core.resume_torrent([input_id]) + if client_agent == 'qbittorrent' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.resume(input_hash) time.sleep(5) except: - logger.warning("Failed to start torrent {0} in {1}".format(inputName, clientAgent)) + logger.warning("Failed to start torrent {0} in {1}".format(input_name, client_agent)) -def remove_torrent(clientAgent, inputHash, inputID, inputName): +def remove_torrent(client_agent, input_hash, input_id, input_name): if core.DELETE_ORIGINAL == 1 or core.USELINK == 'move': - logger.debug("Deleting torrent {0} from {1}".format(inputName, clientAgent)) + logger.debug("Deleting torrent {0} from {1}".format(input_name, client_agent)) try: - if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.removedata(inputHash) - core.TORRENT_CLASS.remove(inputHash) - if clientAgent == 'transmission' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.remove_torrent(inputID, True) - if clientAgent == 'deluge' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.core.remove_torrent(inputID, True) - if clientAgent == 'qbittorrent' and core.TORRENT_CLASS != "": - core.TORRENT_CLASS.delete_permanently(inputHash) + if client_agent == 'utorrent' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.removedata(input_hash) + core.TORRENT_CLASS.remove(input_hash) + if client_agent == 'transmission' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.remove_torrent(input_id, True) + if client_agent == 'deluge' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.core.remove_torrent(input_id, True) + if client_agent == 'qbittorrent' and core.TORRENT_CLASS != "": + core.TORRENT_CLASS.delete_permanently(input_hash) time.sleep(5) except: - logger.warning("Failed to delete torrent {0} in {1}".format(inputName, clientAgent)) + logger.warning("Failed to delete torrent {0} in {1}".format(input_name, client_agent)) else: - resume_torrent(clientAgent, inputHash, inputID, inputName) + resume_torrent(client_agent, input_hash, input_id, input_name) -def find_download(clientAgent, download_id): - logger.debug("Searching for Download on {0} ...".format(clientAgent)) - if clientAgent == 'utorrent': +def find_download(client_agent, download_id): + logger.debug("Searching for Download on {0} ...".format(client_agent)) + if client_agent == 'utorrent': torrents = core.TORRENT_CLASS.list()[1]['torrents'] for torrent in torrents: if download_id in torrent: return True - if clientAgent == 'transmission': + if client_agent == 'transmission': torrents = core.TORRENT_CLASS.get_torrents() for torrent in torrents: hash = torrent.hashString if hash == download_id: return True - if clientAgent == 'deluge': + if client_agent == 'deluge': return False - if clientAgent == 'qbittorrent': + if client_agent == 'qbittorrent': torrents = core.TORRENT_CLASS.torrents() for torrent in torrents: if torrent['hash'] == download_id: return True - if clientAgent == 'sabnzbd': + if client_agent == 'sabnzbd': if "http" in core.SABNZBDHOST: base_url = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) else: @@ -946,7 +940,7 @@ def find_download(clientAgent, download_id): return False -def get_nzoid(inputName): +def get_nzoid(input_name): nzoid = None slots = [] logger.debug("Searching for nzoid from SAbnzbd ...") @@ -967,7 +961,7 @@ def get_nzoid(inputName): return nzoid # failure try: result = r.json() - clean_name = os.path.splitext(os.path.split(inputName)[1])[0] + clean_name = os.path.splitext(os.path.split(input_name)[1])[0] slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']]) except: logger.warning("Data from SABnzbd queue could not be parsed") @@ -979,13 +973,13 @@ def get_nzoid(inputName): return nzoid # failure try: result = r.json() - clean_name = os.path.splitext(os.path.split(inputName)[1])[0] + clean_name = os.path.splitext(os.path.split(input_name)[1])[0] slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']]) except: logger.warning("Data from SABnzbd history could not be parsed") try: for nzo_id, name in slots: - if name in [inputName, clean_name]: + if name in [input_name, clean_name]: nzoid = nzo_id logger.debug("Found nzoid: {0}".format(nzoid)) break @@ -1039,14 +1033,14 @@ def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, o return False -def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): +def list_media_files(path, min_size=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): files = [] if not os.path.isdir(path): if os.path.isfile(path): # Single file downloads. cur_file = os.path.split(path)[1] if is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(path) or not is_min_size(path, minSize): + if is_sample(path) or not is_min_size(path, min_size): if delete_ignored == 1: try: os.unlink(path) @@ -1064,11 +1058,11 @@ def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, # if it's a folder do it recursively if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): - files += list_media_files(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) + files += list_media_files(full_cur_file, min_size, delete_ignored, media, audio, meta, archives, other, otherext) elif is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(full_cur_file) or not is_min_size(full_cur_file, minSize): + if is_sample(full_cur_file) or not is_min_size(full_cur_file, min_size): if delete_ignored == 1: try: os.unlink(full_cur_file) @@ -1083,20 +1077,20 @@ def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, return sorted(files, key=len) -def find_imdbid(dirName, inputName, omdbApiKey): +def find_imdbid(dir_name, input_name, omdb_api_key): imdbid = None - logger.info('Attemping imdbID lookup for {0}'.format(inputName)) + logger.info('Attemping imdbID lookup for {0}'.format(input_name)) # find imdbid in dirName logger.info('Searching folder and file names for imdbID ...') - m = re.search('(tt\d{7})', dirName + inputName) + m = re.search('(tt\d{7})', dir_name + input_name) if m: imdbid = m.group(1) logger.info("Found imdbID [{0}]".format(imdbid)) return imdbid - if os.path.isdir(dirName): - for file in os.listdir(text_type(dirName)): + if os.path.isdir(dir_name): + for file in os.listdir(text_type(dir_name)): m = re.search('(tt\d{7})', file) if m: imdbid = m.group(1) @@ -1113,7 +1107,7 @@ def find_imdbid(dirName, inputName, omdbApiKey): return imdbid logger.info('Searching IMDB for imdbID ...') try: - guess = guessit.guessit(inputName) + guess = guessit.guessit(input_name) except: guess = None if guess: @@ -1129,15 +1123,15 @@ def find_imdbid(dirName, inputName, omdbApiKey): url = "http://www.omdbapi.com" - if not omdbApiKey: + if not omdb_api_key: logger.info("Unable to determine imdbID: No api key provided for ombdapi.com.") return logger.debug("Opening URL: {0}".format(url)) try: - r = requests.get(url, params={'apikey': omdbApiKey, 'y': year, 't': title}, - verify=False, timeout=(60, 300)) + r = requests.get(url, params={'apikey': omdb_api_key, 'y': year, 't': title}, + verify=False, timeout=(60, 300)) except requests.ConnectionError: logger.error("Unable to open URL {0}".format(url)) return @@ -1156,7 +1150,7 @@ def find_imdbid(dirName, inputName, omdbApiKey): logger.info("Found imdbID [{0}]".format(imdbid)) return imdbid - logger.warning('Unable to find a imdbID for {0}'.format(inputName)) + logger.warning('Unable to find a imdbID for {0}'.format(input_name)) return imdbid @@ -1223,14 +1217,14 @@ def import_subs(filename): logger.error("Failed to download subtitles for {0} due to: {1}".format(filename, e), 'SUBTITLES') -def server_responding(baseURL): - logger.debug("Attempting to connect to server at {0}".format(baseURL), 'SERVER') +def server_responding(base_url): + logger.debug("Attempting to connect to server at {0}".format(base_url), 'SERVER') try: - requests.get(baseURL, timeout=(60, 120), verify=False) - logger.debug("Server responded at {0}".format(baseURL), 'SERVER') + requests.get(base_url, timeout=(60, 120), verify=False) + logger.debug("Server responded at {0}".format(base_url), 'SERVER') return True except (requests.ConnectionError, requests.exceptions.Timeout): - logger.error("Server failed to respond at {0}".format(baseURL), 'SERVER') + logger.error("Server failed to respond at {0}".format(base_url), 'SERVER') return False @@ -1287,20 +1281,20 @@ def backup_versioned_file(old_file, version): return True -def update_download_info_status(inputName, status): - logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) +def update_download_info_status(input_name, status): + logger.db("Updating status of our download {0} in the DB to {1}".format(input_name, status)) my_db = nzbToMediaDB.DBConnection() my_db.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?", - [status, datetime.date.today().toordinal(), text_type(inputName)]) + [status, datetime.date.today().toordinal(), text_type(input_name)]) -def get_download_info(inputName, status): - logger.db("Getting download info for {0} from the DB".format(inputName)) +def get_download_info(input_name, status): + logger.db("Getting download info for {0} from the DB".format(input_name)) my_db = nzbToMediaDB.DBConnection() sql_results = my_db.select("SELECT * FROM downloads WHERE input_name=? AND status=?", - [text_type(inputName), status]) + [text_type(input_name), status]) return sql_results diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index deb74e4b..49ca1ace 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -116,7 +116,7 @@ def get_video_details(videofile, img=None, bitbucket=None): return video_details, result -def build_commands(file, newDir, movieName, bitbucket): +def build_commands(file, new_dir, movie_name, bitbucket): if isinstance(file, string_types): input_file = file if 'concat:' in file: @@ -126,12 +126,12 @@ def build_commands(file, newDir, movieName, bitbucket): name, ext = os.path.splitext(name) check = re.match("VTS_([0-9][0-9])_[0-9]+", name) if check and core.CONCAT: - name = movieName + name = movie_name elif check: - name = ('{0}.cd{1}'.format(movieName, check.groups()[0])) + name = ('{0}.cd{1}'.format(movie_name, check.groups()[0])) elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name): name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name) - if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. + if ext == core.VEXTENSION and new_dir == dir: # we need to change the name to prevent overwriting itself. core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext' else: img, data = next(iteritems(file)) @@ -140,7 +140,7 @@ def build_commands(file, newDir, movieName, bitbucket): input_file = '-' file = '-' - newfile_path = os.path.normpath(os.path.join(newDir, name) + core.VEXTENSION) + newfile_path = os.path.normpath(os.path.join(new_dir, name) + core.VEXTENSION) map_cmd = [] video_cmd = [] @@ -527,7 +527,7 @@ def get_subs(file): return subfiles -def extract_subs(file, newfilePath, bitbucket): +def extract_subs(file, newfile_path, bitbucket): video_details, result = get_video_details(file) if not video_details: return @@ -535,8 +535,8 @@ def extract_subs(file, newfilePath, bitbucket): if core.SUBSDIR: subdir = core.SUBSDIR else: - subdir = os.path.split(newfilePath)[0] - name = os.path.splitext(os.path.split(newfilePath)[1])[0] + subdir = os.path.split(newfile_path)[0] + name = os.path.splitext(os.path.split(newfile_path)[1])[0] try: sub_streams = [item for item in video_details["streams"] if @@ -586,17 +586,17 @@ def extract_subs(file, newfilePath, bitbucket): logger.error("Extracting subtitles has failed") -def process_list(List, newDir, bitbucket): +def process_list(it, new_dir, bitbucket): rem_list = [] new_list = [] combine = [] vts_path = None success = True - for item in List: + for item in it: ext = os.path.splitext(item)[1].lower() if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") - new_list.extend(rip_iso(item, newDir, bitbucket)) + new_list.extend(rip_iso(item, new_dir, bitbucket)) rem_list.append(item) elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") @@ -622,18 +622,18 @@ def process_list(List, newDir, bitbucket): success = False break if success and new_list: - List.extend(new_list) + it.extend(new_list) for item in rem_list: - List.remove(item) + it.remove(item) logger.debug("Successfully extracted .vob file {0} from disk image".format(new_list[0]), "TRANSCODER") elif new_list and not success: new_list = [] rem_list = [] logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER") - return List, rem_list, new_list, success + return it, rem_list, new_list, success -def rip_iso(item, newDir, bitbucket): +def rip_iso(item, new_dir, bitbucket): new_files = [] failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. @@ -681,7 +681,7 @@ def rip_iso(item, newDir, bitbucket): return new_files -def combine_vts(vtsPath): +def combine_vts(vts_path): new_files = [] combined = '' for n in range(99): @@ -689,8 +689,8 @@ def combine_vts(vtsPath): m = 1 while True: vts_name = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) - if os.path.isfile(os.path.join(vtsPath, vts_name)): - concat += '{file}|'.format(file=os.path.join(vtsPath, vts_name)) + if os.path.isfile(os.path.join(vts_path, vts_name)): + concat += '{file}|'.format(file=os.path.join(vts_path, vts_name)) m += 1 else: break @@ -728,29 +728,29 @@ def print_cmd(command): logger.debug("calling command:{0}".format(cmd)) -def transcode_directory(dirName): +def transcode_directory(dir_name): if not core.FFMPEG: - return 1, dirName + return 1, dir_name logger.info("Checking for files to be transcoded") final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: new_dir = core.OUTPUTVIDEOPATH make_dir(new_dir) - name = os.path.splitext(os.path.split(dirName)[1])[0] + name = os.path.splitext(os.path.split(dir_name)[1])[0] new_dir = os.path.join(new_dir, name) make_dir(new_dir) else: - new_dir = dirName + new_dir = dir_name if platform.system() == 'Windows': bitbucket = open('NUL') else: bitbucket = open('/dev/null') - movie_name = os.path.splitext(os.path.split(dirName)[1])[0] - file_list = core.list_media_files(dirName, media=True, audio=False, meta=False, archives=False) + movie_name = os.path.splitext(os.path.split(dir_name)[1])[0] + file_list = core.list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) file_list, rem_list, new_list, success = process_list(file_list, new_dir, bitbucket) if not success: bitbucket.close() - return 1, dirName + return 1, dir_name for file in file_list: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: @@ -821,8 +821,8 @@ def transcode_directory(dirName): pass if not os.listdir(text_type(new_dir)): # this is an empty directory and we didn't transcode into it. os.rmdir(new_dir) - new_dir = dirName + new_dir = dir_name if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB - new_dir = dirName + new_dir = dir_name bitbucket.close() return final_result, new_dir diff --git a/nzbToMedia.py b/nzbToMedia.py index 8eae32cc..0a855443 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -644,20 +644,17 @@ except NameError: # post-processing -def process(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None): - input_directory = inputDirectory - input_name = inputName - input_category = inputCategory +def process(input_directory, input_name=None, status=0, client_agent='manual', download_id=None, input_category=None, failure_link=None): if core.SAFE_MODE and input_directory == core.NZB_DEFAULTDIR: logger.error( 'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format( input_directory)) return [-1, ""] - if not download_id and clientAgent == 'sabnzbd': + if not download_id and client_agent == 'sabnzbd': download_id = get_nzoid(input_name) - if clientAgent != 'manual' and not core.DOWNLOADINFO: + if client_agent != 'manual' and not core.DOWNLOADINFO: logger.debug('Adding NZB download info for directory {0} to database'.format(input_directory)) my_db = nzbToMediaDB.DBConnection() @@ -675,7 +672,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down new_value_dict = {"input_name": text_type(input_name1), "input_hash": text_type(download_id), "input_id": text_type(download_id), - "client_agent": text_type(clientAgent), + "client_agent": text_type(client_agent), "status": 0, "last_update": datetime.date.today().toordinal() } @@ -732,18 +729,18 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) if section_name in ["CouchPotato", "Radarr"]: - result = Movie().process(section_name, input_directory, input_name, status, clientAgent, download_id, - input_category, failureLink) + result = Movie().process(section_name, input_directory, input_name, status, client_agent, download_id, + input_category, failure_link) elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: - result = TV().process_episode(section_name, input_directory, input_name, status, clientAgent, - download_id, input_category, failureLink) + result = TV().process_episode(section_name, input_directory, input_name, status, client_agent, + download_id, input_category, failure_link) elif section_name in ["HeadPhones", "Lidarr"]: - result = Music().process(section_name, input_directory, input_name, status, clientAgent, input_category) + result = Music().process(section_name, input_directory, input_name, status, client_agent, input_category) elif section_name == "Mylar": - result = Comic().process_episode(section_name, input_directory, input_name, status, clientAgent, + result = Comic().process_episode(section_name, input_directory, input_name, status, client_agent, input_category) elif section_name == "Gamez": - result = Game().process(section_name, input_directory, input_name, status, clientAgent, input_category) + result = Game().process(section_name, input_directory, input_name, status, client_agent, input_category) elif section_name == 'UserScript': result = external_script(input_directory, input_name, input_category, section[usercat]) else: @@ -752,7 +749,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down plex_update(input_category) if result[0] == 0: - if clientAgent != 'manual': + if client_agent != 'manual': # update download status in our DB update_download_info_status(input_name, 1) if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: @@ -836,8 +833,8 @@ def main(args, section=None): # All checks done, now launching the script. client_agent = 'nzbget' result = process(os.environ['NZBPP_DIRECTORY'], input_name=os.environ['NZBPP_NZBNAME'], status=status, - clientAgent=client_agent, download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'], - failureLink=failure_link) + client_agent=client_agent, download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'], + failure_link=failure_link) # SABnzbd Pre 0.7.17 elif len(args) == core.SABNZB_NO_OF_ARGUMENTS: # SABnzbd argv: @@ -850,7 +847,7 @@ def main(args, section=None): # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 client_agent = 'sabnzbd' logger.info("Script triggered from SABnzbd") - result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], clientAgent=client_agent, + result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], client_agent=client_agent, download_id='') # SABnzbd 0.7.17+ elif len(args) >= core.SABNZB_0717_NO_OF_ARGUMENTS: @@ -865,8 +862,8 @@ def main(args, section=None): # 8 Failure URL client_agent = 'sabnzbd' logger.info("Script triggered from SABnzbd 0.7.17+") - result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], clientAgent=client_agent, - download_id='', failureLink=''.join(args[8:])) + result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], client_agent=client_agent, + download_id='', failure_link=''.join(args[8:])) # Generic program elif len(args) > 5 and args[5] == 'generic': logger.info("Script triggered from generic program") @@ -910,7 +907,7 @@ def main(args, section=None): except UnicodeError: pass - results = process(dir_name, input_name, 0, clientAgent=client_agent, + results = process(dir_name, input_name, 0, client_agent=client_agent, download_id=download_id or None, input_category=subsection) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format