From d8cbf422dddd388c946918f3aa09d06c3dc43293 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 16 Dec 2018 21:37:44 -0500 Subject: [PATCH] PEP8 Function name should be lowercase --- TorrentToMedia.py | 50 +++++----- core/__init__.py | 16 ++-- core/autoProcess/autoProcessComics.py | 6 +- core/autoProcess/autoProcessMovie.py | 26 ++--- core/autoProcess/autoProcessMusic.py | 22 ++--- core/autoProcess/autoProcessTV.py | 38 ++++---- core/databases/mainDB.py | 14 +-- core/extractor/extractor.py | 2 +- core/gh_api.py | 6 +- core/logger.py | 2 +- core/nzbToMediaAutoFork.py | 2 +- core/nzbToMediaDB.py | 38 ++++---- core/nzbToMediaSceneExceptions.py | 4 +- core/nzbToMediaUserScript.py | 8 +- core/nzbToMediaUtil.py | 132 +++++++++++++------------- core/transcoder/transcoder.py | 46 ++++----- nzbToMedia.py | 24 ++--- tests/general.py | 6 +- 18 files changed, 221 insertions(+), 221 deletions(-) diff --git a/TorrentToMedia.py b/TorrentToMedia.py index a32c2983..e021ee1d 100755 --- a/TorrentToMedia.py +++ b/TorrentToMedia.py @@ -7,11 +7,11 @@ import sys import core from core import logger, nzbToMediaDB from core.nzbToMediaUserScript import external_script -from core.nzbToMediaUtil import CharReplace, convert_to_ascii, plex_update, replace_links +from core.nzbToMediaUtil import char_replace, convert_to_ascii, plex_update, replace_links from libs.six import text_type -def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): +def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): input_directory = inputDirectory input_name = inputName input_category = inputCategory @@ -29,8 +29,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, input_name1 = input_name try: - encoded, input_directory1 = CharReplace(input_directory) - encoded, input_name1 = CharReplace(input_name) + encoded, input_directory1 = char_replace(input_directory) + encoded, input_name1 = char_replace(input_name) except: pass @@ -109,12 +109,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, # This way Processing is isolated. if not os.path.isdir(os.path.join(input_directory, input_name)): basename = os.path.basename(input_directory) - basename = core.sanitizeName(input_name) \ - if input_name == basename else os.path.splitext(core.sanitizeName(input_name))[0] + basename = core.sanitize_name(input_name) \ + if input_name == basename else os.path.splitext(core.sanitize_name(input_name))[0] output_destination = os.path.join(core.OUTPUTDIRECTORY, input_category, basename) elif unique_path: output_destination = os.path.normpath( - core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitizeName(input_name).replace(" ","."))) + core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitize_name(input_name).replace(" ", "."))) else: output_destination = os.path.normpath( core.os.path.join(core.OUTPUTDIRECTORY, input_category)) @@ -143,9 +143,9 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, now = datetime.datetime.now() if extract == 1: - input_files = core.listMediaFiles(input_directory, archives=False, other=True, otherext=extensions) + input_files = core.list_media_files(input_directory, archives=False, other=True, otherext=extensions) else: - input_files = core.listMediaFiles(input_directory, other=True, otherext=extensions) + input_files = core.list_media_files(input_directory, other=True, otherext=extensions) if len(input_files) == 0 and os.path.isfile(input_directory): input_files = [input_directory] logger.debug("Found 1 file to process: {0}".format(input_directory)) @@ -170,8 +170,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if root == 1: if not found_file: logger.debug("Looking for {0} in: {1}".format(input_name, inputFile)) - if any([core.sanitizeName(input_name) in core.sanitizeName(inputFile), - core.sanitizeName(file_name) in core.sanitizeName(input_name)]): + if any([core.sanitize_name(input_name) in core.sanitize_name(inputFile), + core.sanitize_name(file_name) in core.sanitize_name(input_name)]): found_file = True logger.debug("Found file {0} that matches Torrent Name {1}".format (full_file_name, input_name)) @@ -194,7 +194,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if torrent_no_link == 0: try: core.copy_link(inputFile, target_file, core.USELINK) - core.rmReadOnly(target_file) + core.remove_read_only(target_file) except: logger.error("Failed to link: {0} to {1}".format(inputFile, target_file)) @@ -202,7 +202,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if extract == 1: logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) - core.extractFiles(input_directory, output_destination, keep_archive) + core.extract_files(input_directory, output_destination, keep_archive) if input_category not in core.NOFLATTEN: # don't flatten hp in case multi cd albums, and we need to copy this back later. @@ -211,7 +211,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, # Now check if video files exist in destination: if section_name in ["SickBeard", "NzbDrone", "Sonarr", "CouchPotato", "Radarr"]: num_videos = len( - core.listMediaFiles(output_destination, media=True, audio=False, meta=False, archives=False)) + core.list_media_files(output_destination, media=True, audio=False, meta=False, archives=False)) if num_videos > 0: logger.info("Found {0} media files in {1}".format(num_videos, output_destination)) status = 0 @@ -241,14 +241,14 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: if input_hash: input_hash = input_hash.upper() - result = core.autoProcessTV().processEpisode(section_name, output_destination, input_name, - status, clientAgent, input_hash, input_category) + result = core.autoProcessTV().process_episode(section_name, output_destination, input_name, + status, clientAgent, input_hash, input_category) elif section_name in ['HeadPhones', 'Lidarr']: result = core.autoProcessMusic().process(section_name, output_destination, input_name, status, clientAgent, input_category) elif section_name == 'Mylar': - result = core.autoProcessComics().processEpisode(section_name, output_destination, input_name, - status, clientAgent, input_category) + result = core.autoProcessComics().process_episode(section_name, output_destination, input_name, + status, clientAgent, input_category) elif section_name == 'Gamez': result = core.autoProcessGames().process(section_name, output_destination, input_name, status, clientAgent, input_category) @@ -267,7 +267,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, else: if clientAgent != 'manual': # update download status in our DB - core.update_downloadInfoStatus(input_name, 1) + core.update_download_info_status(input_name, 1) # remove torrent if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: @@ -281,7 +281,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, if not section_name == 'UserScript': # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # cleanup our processing folders of any misc unwanted files and empty directories - core.cleanDir(output_destination, section_name, input_category) + core.clean_dir(output_destination, section_name, input_category) return result @@ -310,7 +310,7 @@ def main(args): return -1 if input_directory and input_name and input_hash and input_id: - result = processTorrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) + result = process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) else: # Perform Manual Post-Processing logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") @@ -319,13 +319,13 @@ def main(args): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dir_name in core.getDirs(section, subsection, link='hard'): + for dir_name in core.get_dirs(section, subsection, link='hard'): logger.info("Starting manual run for {0}:{1} - Folder:{2}".format (section, subsection, dir_name)) logger.info("Checking database for download info for {0} ...".format (os.path.basename(dir_name))) - core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dir_name), 0) + core.DOWNLOADINFO = core.get_download_info(os.path.basename(dir_name), 0) if core.DOWNLOADINFO: client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) input_hash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) @@ -353,8 +353,8 @@ def main(args): except UnicodeError: pass - results = processTorrent(dir_name, input_name, subsection, input_hash or None, input_id or None, - client_agent) + results = process_torrent(dir_name, input_name, subsection, input_hash or None, input_id or None, + client_agent) if results[0] != 0: logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format (section, subsection)) diff --git a/core/__init__.py b/core/__init__.py index 212158ca..ca9f2d8d 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -45,10 +45,10 @@ from core.autoProcess.autoProcessTV import autoProcessTV from core.databases import mainDB from core.nzbToMediaConfig import config from core.nzbToMediaUtil import ( - RunningProcess, WakeUp, category_search, cleanDir, cleanDir, copy_link, - create_torrent_class, extractFiles, flatten, getDirs, get_downloadInfo, - listMediaFiles, makeDir, parse_args, pause_torrent, remove_torrent, - resume_torrent, rmDir, rmReadOnly, sanitizeName, update_downloadInfoStatus, + RunningProcess, wake_up, category_search, clean_dir, clean_dir, copy_link, + create_torrent_class, extract_files, flatten, get_dirs, get_download_info, + list_media_files, make_dir, parse_args, pause_torrent, remove_torrent, + resume_torrent, remove_dir, remove_read_only, sanitize_name, update_download_info_status, ) from core.transcoder import transcoder @@ -255,7 +255,7 @@ def initialize(section=None): LOG_FILE = os.environ['NTM_LOGFILE'] LOG_DIR = os.path.split(LOG_FILE)[0] - if not makeDir(LOG_DIR): + if not make_dir(LOG_DIR): print("No log folder, logging to screen only") MYAPP = RunningProcess() @@ -291,7 +291,7 @@ def initialize(section=None): sys.exit(1) # init logging - logger.ntm_log_instance.initLogging() + logger.ntm_log_instance.init_logging() # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. if not config.migrate(): @@ -320,7 +320,7 @@ def initialize(section=None): logger.info("{0}: {1}".format(item, os.environ[item]), "ENVIRONMENT") # initialize the main SB database - nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) + nzbToMediaDB.upgrade_database(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) # Set Version and GIT variables NZBTOMEDIA_VERSION = '11.06' @@ -357,7 +357,7 @@ def initialize(section=None): system=platform.system(), release=platform.release())) if int(CFG["WakeOnLan"]["wake"]) == 1: - WakeUp() + wake_up() NZB_CLIENTAGENT = CFG["Nzb"]["clientAgent"] # sabnzbd SABNZBDHOST = CFG["Nzb"]["sabnzbd_host"] diff --git a/core/autoProcess/autoProcessComics.py b/core/autoProcess/autoProcessComics.py index 6d4e87f7..c2d00bd1 100644 --- a/core/autoProcess/autoProcessComics.py +++ b/core/autoProcess/autoProcessComics.py @@ -6,13 +6,13 @@ import requests import core from core import logger -from core.nzbToMediaUtil import convert_to_ascii, remoteDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, remote_dir, server_responding requests.packages.urllib3.disable_warnings() class autoProcessComics(object): - def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): + def process_episode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): dir_name = dirName input_name = inputName @@ -42,7 +42,7 @@ class autoProcessComics(object): params = { 'cmd': 'forceProcess', 'apikey': apikey, - 'nzb_folder': remoteDir(dir_name) if remote_path else dir_name, + 'nzb_folder': remote_dir(dir_name) if remote_path else dir_name, } if input_name is not None: diff --git a/core/autoProcess/autoProcessMovie.py b/core/autoProcess/autoProcessMovie.py index 44daf1e7..4a30e1cf 100644 --- a/core/autoProcess/autoProcessMovie.py +++ b/core/autoProcess/autoProcessMovie.py @@ -9,7 +9,7 @@ import requests import core from core import logger from core.nzbToMediaSceneExceptions import process_all_exceptions -from core.nzbToMediaUtil import convert_to_ascii, find_download, find_imdbid, import_subs, listMediaFiles, remoteDir, reportNzb, rmDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, find_download, find_imdbid, import_subs, list_media_files, remote_dir, report_nzb, remove_dir, server_responding from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() @@ -129,7 +129,7 @@ class autoProcessMovie(object): logger.error("{0} did not return expected json data.".format(section), section) return None - def CDH(self, url2, headers, section="MAIN"): + def completed_download_handling(self, url2, headers, section="MAIN"): try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -223,17 +223,17 @@ class autoProcessMovie(object): process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) - core.extractFiles(dir_name) + core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) good_files = 0 num_files = 0 # Check video files for corruption - for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 - if transcoder.isVideoGood(video, status): + if transcoder.is_video_good(video, status): import_subs(video) good_files += 1 if num_files and good_files == num_files: @@ -258,7 +258,7 @@ class autoProcessMovie(object): if status == 0: if core.TRANSCODE == 1: - result, new_dir_name = transcoder.Transcode_directory(dir_name) + result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: logger.debug("Transcoding succeeded for files in {0}".format(dir_name), section) dir_name = new_dir_name @@ -271,7 +271,7 @@ class autoProcessMovie(object): else: logger.error("Transcoding failed for files in {0}".format(dir_name), section) return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] - for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): if not release and ".cp(tt" not in video and imdbid: video_name, video_ext = os.path.splitext(video) video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext) @@ -288,7 +288,7 @@ class autoProcessMovie(object): params['downloader'] = downloader or clientAgent params['download_id'] = download_id - params['media_folder'] = remoteDir(dir_name) if remote_path else dir_name + params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name if section == "CouchPotato": if method == "manage": @@ -344,7 +344,7 @@ class autoProcessMovie(object): core.FAILED = True logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section) if failureLink: - reportNzb(failureLink, clientAgent) + report_nzb(failureLink, clientAgent) if section == "Radarr": logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section) @@ -352,7 +352,7 @@ class autoProcessMovie(object): if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) if not release_id and not media_id: logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(input_name), @@ -451,7 +451,7 @@ class autoProcessMovie(object): dir_name), section) return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] - elif not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=True): + elif not list_media_files(dir_name, media=True, audio=False, meta=False, archives=True): logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format( dir_name), section) return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] @@ -460,7 +460,7 @@ class autoProcessMovie(object): time.sleep(10 * wait_for) # The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now. - if section == "Radarr" and self.CDH(url2, headers, section=section): + if section == "Radarr" and self.completed_download_handling(url2, headers, section=section): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] logger.warning( diff --git a/core/autoProcess/autoProcessMusic.py b/core/autoProcess/autoProcessMusic.py index f931c42e..343bbe92 100644 --- a/core/autoProcess/autoProcessMusic.py +++ b/core/autoProcess/autoProcessMusic.py @@ -9,7 +9,7 @@ import requests import core from core import logger from core.nzbToMediaSceneExceptions import process_all_exceptions -from core.nzbToMediaUtil import convert_to_ascii, listMediaFiles, remoteDir, rmDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, list_media_files, remote_dir, remove_dir, server_responding requests.packages.urllib3.disable_warnings() @@ -58,7 +58,7 @@ class autoProcessMusic(object): if os.path.basename(dirName) == album['FolderName']: return album["Status"].lower() - def forceProcess(self, params, url, apikey, inputName, dirName, section, wait_for): + def force_process(self, params, url, apikey, inputName, dirName, section, wait_for): release_status = self.get_status(url, apikey, dirName) if not release_status: logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section) @@ -140,9 +140,9 @@ class autoProcessMusic(object): process_all_exceptions(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - if not listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + if not list_media_files(dir_name, media=False, audio=True, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) - core.extractFiles(dir_name) + core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) #if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: @@ -154,20 +154,20 @@ class autoProcessMusic(object): params = { 'apikey': apikey, 'cmd': "forceProcess", - 'dir': remoteDir(dir_name) if remote_path else dir_name + 'dir': remote_dir(dir_name) if remote_path else dir_name } - res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) + res = self.force_process(params, url, apikey, input_name, dir_name, section, wait_for) if res[0] in [0, 1]: return res params = { 'apikey': apikey, 'cmd': "forceProcess", - 'dir': os.path.split(remoteDir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] + 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] } - res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) + res = self.force_process(params, url, apikey, input_name, dir_name, section, wait_for) if res[0] in [0, 1]: return res @@ -179,8 +179,8 @@ class autoProcessMusic(object): url = "{0}{1}:{2}{3}/api/v1/command".format(protocol, host, port, web_root) headers = {"X-Api-Key": apikey} if remote_path: - logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) - data = {"name": "Rename", "path": remoteDir(dir_name)} + logger.debug("remote_path: {0}".format(remote_dir(dir_name)), section) + data = {"name": "Rename", "path": remote_dir(dir_name)} else: logger.debug("path: {0}".format(dir_name), section) data = {"name": "Rename", "path": dir_name} @@ -238,5 +238,5 @@ class autoProcessMusic(object): logger.warning("FAILED DOWNLOAD DETECTED", section) if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. \ No newline at end of file diff --git a/core/autoProcess/autoProcessTV.py b/core/autoProcess/autoProcessTV.py index 3175a852..0a2b7761 100644 --- a/core/autoProcess/autoProcessTV.py +++ b/core/autoProcess/autoProcessTV.py @@ -10,9 +10,9 @@ import requests import core from core import logger -from core.nzbToMediaAutoFork import autoFork +from core.nzbToMediaAutoFork import auto_fork from core.nzbToMediaSceneExceptions import process_all_exceptions -from core.nzbToMediaUtil import convert_to_ascii, flatten, import_subs, listMediaFiles, remoteDir, reportNzb, rmDir, server_responding +from core.nzbToMediaUtil import convert_to_ascii, flatten, import_subs, list_media_files, remote_dir, report_nzb, remove_dir, server_responding from core.transcoder import transcoder requests.packages.urllib3.disable_warnings() @@ -36,7 +36,7 @@ class autoProcessTV(object): logger.error("{0} did not return expected json data.".format(section), section) return None - def CDH(self, url2, headers, section="MAIN"): + def completed_download_handling(self, url2, headers, section="MAIN"): try: r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) except requests.ConnectionError: @@ -52,7 +52,7 @@ class autoProcessTV(object): # ValueError catches simplejson's JSONDecodeError and json's ValueError return False - def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): + def process_episode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): cfg = dict(core.CFG[section][inputCategory]) @@ -67,7 +67,7 @@ class autoProcessTV(object): if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): # auto-detect correct fork - fork, fork_params = autoFork(section, inputCategory) + fork, fork_params = auto_fork(section, inputCategory) elif not username and not apikey: logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only') fork, fork_params = "None", {} @@ -119,21 +119,21 @@ class autoProcessTV(object): input_name, dir_name = convert_to_ascii(input_name, dir_name) # Now check if tv files exist in destination. - if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): - if listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: + if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): + if list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract: logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) - core.extractFiles(dir_name) + core.extract_files(dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name) - if listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. + if list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. flatten(dir_name) # Check video files for corruption good_files = 0 num_files = 0 - for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): + for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): num_files += 1 - if transcoder.isVideoGood(video, status): + if transcoder.is_video_good(video, status): good_files += 1 import_subs(video) if num_files > 0: @@ -170,7 +170,7 @@ class autoProcessTV(object): print('[NZB] MARK=BAD') if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads - result, new_dir_name = transcoder.Transcode_directory(dir_name) + result, new_dir_name = transcoder.transcode_directory(dir_name) if result == 0: logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dir_name), section) dir_name = new_dir_name @@ -209,7 +209,7 @@ class autoProcessTV(object): if param in ["dir_name", "dir", "proc_dir", "process_directory", "path"]: fork_params[param] = dir_name if remote_path: - fork_params[param] = remoteDir(dir_name) + fork_params[param] = remote_dir(dir_name) if param == "process_method": if process_method: @@ -249,7 +249,7 @@ class autoProcessTV(object): else: core.FAILED = True if failureLink: - reportNzb(failureLink, clientAgent) + report_nzb(failureLink, clientAgent) if 'failed' in fork_params: logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) elif section == "NzbDrone": @@ -259,7 +259,7 @@ class autoProcessTV(object): logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section) if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. url = None @@ -274,8 +274,8 @@ class autoProcessTV(object): headers = {"X-Api-Key": apikey} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} if remote_path: - logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) - data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dir_name), "downloadClientId": download_id, "importMode": import_mode} + logger.debug("remote_path: {0}".format(remote_dir(dir_name)), section) + data = {"name": "DownloadedEpisodesScan", "path": remote_dir(dir_name), "downloadClientId": download_id, "importMode": import_mode} else: logger.debug("path: {0}".format(dir_name), section) data = {"name": "DownloadedEpisodesScan", "path": dir_name, "downloadClientId": download_id, "importMode": import_mode} @@ -340,7 +340,7 @@ class autoProcessTV(object): if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name: logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) - rmDir(dir_name) + remove_dir(dir_name) if success: return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] @@ -365,7 +365,7 @@ class autoProcessTV(object): elif command_status and command_status in ['failed']: logger.debug("The Scan command has failed. Renaming was not successful.", section) # return [1, "%s: Failed to post-process %s" % (section, input_name) ] - if self.CDH(url2, headers, section=section): + if self.completed_download_handling(url2, headers, section=section): logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] else: diff --git a/core/databases/mainDB.py b/core/databases/mainDB.py index d79033db..d6c1e1c3 100644 --- a/core/databases/mainDB.py +++ b/core/databases/mainDB.py @@ -1,15 +1,15 @@ # coding=utf-8 from core import logger, nzbToMediaDB -from core.nzbToMediaUtil import backupVersionedFile +from core.nzbToMediaUtil import backup_versioned_file MIN_DB_VERSION = 1 # oldest db version we support migrating from MAX_DB_VERSION = 2 -def backupDatabase(version): +def backup_database(version): logger.info("Backing up database before upgrade") - if not backupVersionedFile(nzbToMediaDB.dbFilename(), version): + if not backup_versioned_file(nzbToMediaDB.db_filename(), version): logger.log_error_and_exit("Database backup failed, abort upgrading database") else: logger.info("Proceeding with upgrade") @@ -23,13 +23,13 @@ def backupDatabase(version): class InitialSchema(nzbToMediaDB.SchemaUpgrade): def test(self): no_update = False - if self.hasTable("db_version"): - cur_db_version = self.checkDBVersion() + if self.has_table("db_version"): + cur_db_version = self.check_db_version() no_update = not cur_db_version < MAX_DB_VERSION return no_update def execute(self): - if not self.hasTable("downloads") and not self.hasTable("db_version"): + if not self.has_table("downloads") and not self.has_table("db_version"): queries = [ "CREATE TABLE db_version (db_version INTEGER);", "CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));", @@ -39,7 +39,7 @@ class InitialSchema(nzbToMediaDB.SchemaUpgrade): self.connection.action(query) else: - cur_db_version = self.checkDBVersion() + cur_db_version = self.check_db_version() if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit(u"Your database version ({current}) is too old to migrate " diff --git a/core/extractor/extractor.py b/core/extractor/extractor.py index 4f3b4454..14a3a6ba 100644 --- a/core/extractor/extractor.py +++ b/core/extractor/extractor.py @@ -88,7 +88,7 @@ def extract(filePath, outputDestination): return False # Create outputDestination folder - core.makeDir(outputDestination) + core.make_dir(outputDestination) if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))] diff --git a/core/gh_api.py b/core/gh_api.py index 97433d9b..6e44f9f3 100644 --- a/core/gh_api.py +++ b/core/gh_api.py @@ -14,7 +14,7 @@ class GitHub(object): self.github_repo = github_repo self.branch = branch - def _access_API(self, path, params=None): + def _access_api(self, path, params=None): """ Access the API at the path given and with the optional params given. """ @@ -32,7 +32,7 @@ class GitHub(object): Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/ """ - return self._access_API( + return self._access_api( ['repos', self.github_repo_user, self.github_repo, 'commits'], params={'per_page': 100, 'sha': self.branch}, ) @@ -49,7 +49,7 @@ class GitHub(object): Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ """ - return self._access_API( + return self._access_api( ['repos', self.github_repo_user, self.github_repo, 'compare', '{base}...{head}'.format(base=base, head=head)], params={'per_page': per_page}, diff --git a/core/logger.py b/core/logger.py index 1b5d0e9f..7720af33 100644 --- a/core/logger.py +++ b/core/logger.py @@ -58,7 +58,7 @@ class NTMRotatingLogHandler(object): handler.flush() handler.close() - def initLogging(self, consoleLogging=True): + def init_logging(self, consoleLogging=True): if consoleLogging: self.console_logging = consoleLogging diff --git a/core/nzbToMediaAutoFork.py b/core/nzbToMediaAutoFork.py index 8df8c313..0c824176 100644 --- a/core/nzbToMediaAutoFork.py +++ b/core/nzbToMediaAutoFork.py @@ -7,7 +7,7 @@ import core from core import logger -def autoFork(section, inputCategory): +def auto_fork(section, inputCategory): # auto-detect correct section # config settings diff --git a/core/nzbToMediaDB.py b/core/nzbToMediaDB.py index 2b555203..f5c49410 100644 --- a/core/nzbToMediaDB.py +++ b/core/nzbToMediaDB.py @@ -12,7 +12,7 @@ import core from core import logger -def dbFilename(filename="nzbtomedia.db", suffix=None): +def db_filename(filename="nzbtomedia.db", suffix=None): """ @param filename: The sqlite database filename to use. If not specified, will be made to be nzbtomedia.db @@ -29,13 +29,13 @@ class DBConnection(object): def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None): self.filename = filename - self.connection = sqlite3.connect(dbFilename(filename), 20) + self.connection = sqlite3.connect(db_filename(filename), 20) if row_type == "dict": self.connection.row_factory = self._dict_factory else: self.connection.row_factory = sqlite3.Row - def checkDBVersion(self): + def check_db_version(self): result = None try: result = self.select("SELECT db_version FROM db_version") @@ -196,7 +196,7 @@ class DBConnection(object): list(valueDict.values()) ) - def tableInfo(self, tableName): + def table_info(self, tableName): # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName)) columns = {} @@ -212,7 +212,7 @@ class DBConnection(object): return d -def sanityCheckDatabase(connection, sanity_check): +def sanity_check_database(connection, sanity_check): sanity_check(connection).check() @@ -228,22 +228,22 @@ class DBSanityCheck(object): # = Upgrade API = # =============== -def upgradeDatabase(connection, schema): +def upgrade_database(connection, schema): logger.log(u"Checking database structure...", logger.MESSAGE) - _processUpgrade(connection, schema) + _process_upgrade(connection, schema) -def prettyName(class_name): +def pretty_name(class_name): return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)]) -def _processUpgrade(connection, upgradeClass): +def _process_upgrade(connection, upgradeClass): instance = upgradeClass(connection) logger.log(u"Checking {name} database upgrade".format - (name=prettyName(upgradeClass.__name__)), logger.DEBUG) + (name=pretty_name(upgradeClass.__name__)), logger.DEBUG) if not instance.test(): logger.log(u"Database upgrade required: {name}".format - (name=prettyName(upgradeClass.__name__)), logger.MESSAGE) + (name=pretty_name(upgradeClass.__name__)), logger.MESSAGE) try: instance.execute() except sqlite3.DatabaseError as error: @@ -257,7 +257,7 @@ def _processUpgrade(connection, upgradeClass): (name=upgradeClass.__name__), logger.DEBUG) for upgradeSubClass in upgradeClass.__subclasses__(): - _processUpgrade(connection, upgradeSubClass) + _process_upgrade(connection, upgradeSubClass) # Base migration class. All future DB changes should be subclassed from this class @@ -265,24 +265,24 @@ class SchemaUpgrade(object): def __init__(self, connection): self.connection = connection - def hasTable(self, tableName): + def has_table(self, tableName): return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0 - def hasColumn(self, tableName, column): - return column in self.connection.tableInfo(tableName) + def has_column(self, tableName, column): + return column in self.connection.table_info(tableName) - def addColumn(self, table, column, type="NUMERIC", default=0): + def add_column(self, table, column, type="NUMERIC", default=0): self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type)) self.connection.action("UPDATE {0} SET {1} = ?".format(table, column), (default,)) - def checkDBVersion(self): + def check_db_version(self): result = self.connection.select("SELECT db_version FROM db_version") if result: return int(result[-1]["db_version"]) else: return 0 - def incDBVersion(self): - new_version = self.checkDBVersion() + 1 + def inc_db_version(self): + new_version = self.check_db_version() + 1 self.connection.action("UPDATE db_version SET db_version = ?", [new_version]) return new_version diff --git a/core/nzbToMediaSceneExceptions.py b/core/nzbToMediaSceneExceptions.py index 45cb6fce..5cb283a9 100644 --- a/core/nzbToMediaSceneExceptions.py +++ b/core/nzbToMediaSceneExceptions.py @@ -8,7 +8,7 @@ import subprocess import core from core import logger -from core.nzbToMediaUtil import listMediaFiles +from core.nzbToMediaUtil import list_media_files reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.", r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.", @@ -32,7 +32,7 @@ char_replace = [[r"(\w)1\.(\w)", r"\1i\2"] def process_all_exceptions(name, dirname): par2(dirname) rename_script(dirname) - for filename in listMediaFiles(dirname): + for filename in list_media_files(dirname): newfilename = None parent_dir = os.path.dirname(filename) head, file_extension = os.path.splitext(os.path.basename(filename)) diff --git a/core/nzbToMediaUserScript.py b/core/nzbToMediaUserScript.py index da353892..32d2f014 100644 --- a/core/nzbToMediaUserScript.py +++ b/core/nzbToMediaUserScript.py @@ -5,7 +5,7 @@ from subprocess import Popen import core from core import logger -from core.nzbToMediaUtil import import_subs, listMediaFiles, rmDir +from core.nzbToMediaUtil import import_subs, list_media_files, remove_dir from core.transcoder import transcoder @@ -40,8 +40,8 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1)) if core.CHECK_MEDIA: - for video in listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False): - if transcoder.isVideoGood(video, 0): + for video in list_media_files(outputDestination, media=True, audio=False, meta=False, archives=False): + if transcoder.is_video_good(video, 0): import_subs(video) else: logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT") @@ -111,7 +111,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings): if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination)) - rmDir(outputDestination) + remove_dir(outputDestination) elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format( num_files, num_files_new)) diff --git a/core/nzbToMediaUtil.py b/core/nzbToMediaUtil.py index 7d8bdcda..d24a89c6 100644 --- a/core/nzbToMediaUtil.py +++ b/core/nzbToMediaUtil.py @@ -32,11 +32,11 @@ requests.packages.urllib3.disable_warnings() # Monkey Patch shutil.copyfileobj() to adjust the buffer length to 512KB rather than 4KB shutil.copyfileobjOrig = shutil.copyfileobj -def copyfileobjFast(fsrc, fdst, length=512*1024): +def copyfileobj_fast(fsrc, fdst, length=512 * 1024): shutil.copyfileobjOrig(fsrc, fdst, length=length) -shutil.copyfileobj = copyfileobjFast +shutil.copyfileobj = copyfileobj_fast -def reportNzb(failure_link, clientAgent): +def report_nzb(failure_link, clientAgent): # Contact indexer site logger.info("Sending failure notification to indexer site") if clientAgent == 'nzbget': @@ -52,15 +52,15 @@ def reportNzb(failure_link, clientAgent): return -def sanitizeName(name): +def sanitize_name(name): """ - >>> sanitizeName('a/b/c') + >>> sanitize_name('a/b/c') 'a-b-c' - >>> sanitizeName('abc') + >>> sanitize_name('abc') 'abc' - >>> sanitizeName('a"b') + >>> sanitize_name('a"b') 'ab' - >>> sanitizeName('.a.b..') + >>> sanitize_name('.a.b..') 'a.b' """ @@ -78,7 +78,7 @@ def sanitizeName(name): return name -def makeDir(path): +def make_dir(path): if not os.path.isdir(path): try: os.makedirs(path) @@ -87,7 +87,7 @@ def makeDir(path): return True -def remoteDir(path): +def remote_dir(path): if not core.REMOTEPATHS: return path for local, remote in core.REMOTEPATHS: @@ -151,10 +151,10 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): input_directory = os.path.join(input_directory, input_name) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif input_name and os.path.isdir(os.path.join(input_directory, sanitizeName(input_name))): + elif input_name and os.path.isdir(os.path.join(input_directory, sanitize_name(input_name))): logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format( - sanitizeName(input_name), input_directory)) - input_directory = os.path.join(input_directory, sanitizeName(input_name)) + sanitize_name(input_name), input_directory)) + input_directory = os.path.join(input_directory, sanitize_name(input_name)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True elif input_name and os.path.isfile(os.path.join(input_directory, input_name)): @@ -162,10 +162,10 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): input_directory = os.path.join(input_directory, input_name) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True - elif input_name and os.path.isfile(os.path.join(input_directory, sanitizeName(input_name))): + elif input_name and os.path.isfile(os.path.join(input_directory, sanitize_name(input_name))): logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format( - sanitizeName(input_name), input_directory)) - input_directory = os.path.join(input_directory, sanitizeName(input_name)) + sanitize_name(input_name), input_directory)) + input_directory = os.path.join(input_directory, sanitize_name(input_name)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) tordir = True @@ -187,7 +187,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): pass if input_name and not tordir: - if input_name in pathlist or sanitizeName(input_name) in pathlist: + if input_name in pathlist or sanitize_name(input_name) in pathlist: logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(input_name)) tordir = True else: @@ -202,23 +202,23 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories): return input_directory, input_name, input_category, root -def getDirSize(inputPath): +def get_dir_size(inputPath): from functools import partial prepend = partial(os.path.join, inputPath) return sum([ - (os.path.getsize(f) if os.path.isfile(f) else getDirSize(f)) + (os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) for f in map(prepend, os.listdir(text_type(inputPath))) ]) -def is_minSize(inputName, minSize): +def is_min_size(inputName, minSize): file_name, file_ext = os.path.splitext(os.path.basename(inputName)) # audio files we need to check directory size not file size input_size = os.path.getsize(inputName) if file_ext in core.AUDIOCONTAINER: try: - input_size = getDirSize(os.path.dirname(inputName)) + input_size = get_dir_size(os.path.dirname(inputName)) except: logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') return True @@ -249,7 +249,7 @@ def copy_link(src, targetLink, useLink): logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK') return True - makeDir(os.path.dirname(targetLink)) + make_dir(os.path.dirname(targetLink)) try: if useLink == 'dir': logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') @@ -311,7 +311,7 @@ def replace_links(link): def flatten(outputDestination): logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) - for outputFile in listMediaFiles(outputDestination): + for outputFile in list_media_files(outputDestination): dir_path = os.path.dirname(outputFile) file_name = os.path.basename(outputFile) @@ -325,10 +325,10 @@ def flatten(outputDestination): except: logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN') - removeEmptyFolders(outputDestination) # Cleanup empty directories + remove_empty_folders(outputDestination) # Cleanup empty directories -def removeEmptyFolders(path, removeRoot=True): +def remove_empty_folders(path, removeRoot=True): """Function to remove empty folders""" if not os.path.isdir(path): return @@ -340,7 +340,7 @@ def removeEmptyFolders(path, removeRoot=True): for f in files: fullpath = os.path.join(path, f) if os.path.isdir(fullpath): - removeEmptyFolders(fullpath) + remove_empty_folders(fullpath) # if folder empty, delete it files = os.listdir(text_type(path)) @@ -349,7 +349,7 @@ def removeEmptyFolders(path, removeRoot=True): os.rmdir(path) -def rmReadOnly(filename): +def remove_read_only(filename): if os.path.isfile(filename): # check first the read-only attribute file_attribute = os.stat(filename)[0] @@ -364,7 +364,7 @@ def rmReadOnly(filename): # Wake function -def WakeOnLan(ethernet_address): +def wake_on_lan(ethernet_address): addr_byte = ethernet_address.split(':') hw_addr = struct.pack(b'BBBBBB', int(addr_byte[0], 16), int(addr_byte[1], 16), @@ -386,7 +386,7 @@ def WakeOnLan(ethernet_address): # Test Connection function -def TestCon(host, port): +def test_connection(host, port): try: socket.create_connection((host, port)) return "Up" @@ -394,26 +394,26 @@ def TestCon(host, port): return "Down" -def WakeUp(): +def wake_up(): host = core.CFG["WakeOnLan"]["host"] port = int(core.CFG["WakeOnLan"]["port"]) mac = core.CFG["WakeOnLan"]["mac"] i = 1 - while TestCon(host, port) == "Down" and i < 4: + while test_connection(host, port) == "Down" and i < 4: logger.info(("Sending WakeOnLan Magic Packet for mac: {0}".format(mac))) - WakeOnLan(mac) + wake_on_lan(mac) time.sleep(20) i = i + 1 - if TestCon(host, port) == "Down": # final check. + if test_connection(host, port) == "Down": # final check. logger.warning("System with mac: {0} has not woken after 3 attempts. " "Continuing with the rest of the script.".format(mac)) else: logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac)) -def CharReplace(Name): +def char_replace(Name): name = Name # Special character hex range: # CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15) @@ -464,13 +464,13 @@ def convert_to_ascii(inputName, dirName): if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!. return input_name, dir_name - encoded, input_name = CharReplace(input_name) + encoded, input_name = char_replace(input_name) dir, base = os.path.split(dir_name) if not base: # ended with "/" dir, base = os.path.split(dir) - encoded, base2 = CharReplace(base) + encoded, base2 = char_replace(base) if encoded: dir_name = os.path.join(dir, base2) logger.info("Renaming directory to: {0}.".format(base2), 'ENCODER') @@ -480,14 +480,14 @@ def convert_to_ascii(inputName, dirName): for dirname, dirnames, filenames in os.walk(dir_name, topdown=False): for subdirname in dirnames: - encoded, subdirname2 = CharReplace(subdirname) + encoded, subdirname2 = char_replace(subdirname) if encoded: logger.info("Renaming directory to: {0}.".format(subdirname2), 'ENCODER') os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2)) for dirname, dirnames, filenames in os.walk(dir_name): for filename in filenames: - encoded, filename2 = CharReplace(filename) + encoded, filename2 = char_replace(filename) if encoded: logger.info("Renaming file to: {0}.".format(filename2), 'ENCODER') os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2)) @@ -646,10 +646,10 @@ def parse_args(clientAgent, args): return None, None, None, None, None -def getDirs(section, subsection, link='hard'): +def get_dirs(section, subsection, link='hard'): to_return = [] - def processDir(path): + def process_dir(path): folders = [] logger.info("Searching {0} for mediafiles to post-process ...".format(path)) @@ -674,7 +674,7 @@ def getDirs(section, subsection, link='hard'): album = f.album # create new path - new_path = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) + new_path = os.path.join(path, "{0} - {1}".format(sanitize_name(artist), sanitize_name(album))) elif file_ext in core.MEDIACONTAINER: f = guessit.guessit(mediafile) @@ -684,13 +684,13 @@ def getDirs(section, subsection, link='hard'): if not title: title = os.path.splitext(os.path.basename(mediafile))[0] - new_path = os.path.join(path, sanitizeName(title)) + new_path = os.path.join(path, sanitize_name(title)) except Exception as e: logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e)) if not new_path: title = os.path.splitext(os.path.basename(mediafile))[0] - new_path = os.path.join(path, sanitizeName(title)) + new_path = os.path.join(path, sanitize_name(title)) try: new_path = new_path.encode(core.SYS_ENCODING) @@ -704,9 +704,9 @@ def getDirs(section, subsection, link='hard'): # create new path if it does not exist if not os.path.exists(new_path): - makeDir(new_path) + make_dir(new_path) - newfile = os.path.join(new_path, sanitizeName(os.path.split(mediafile)[1])) + newfile = os.path.join(new_path, sanitize_name(os.path.split(mediafile)[1])) try: newfile = newfile.encode(core.SYS_ENCODING) except: @@ -731,9 +731,9 @@ def getDirs(section, subsection, link='hard'): try: watch_dir = os.path.join(core.CFG[section][subsection]["watch_dir"], subsection) if os.path.exists(watch_dir): - to_return.extend(processDir(watch_dir)) + to_return.extend(process_dir(watch_dir)) elif os.path.exists(core.CFG[section][subsection]["watch_dir"]): - to_return.extend(processDir(core.CFG[section][subsection]["watch_dir"])) + to_return.extend(process_dir(core.CFG[section][subsection]["watch_dir"])) except Exception as e: logger.error("Failed to add directories from {0} for post-processing: {1}".format (core.CFG[section][subsection]["watch_dir"], e)) @@ -742,7 +742,7 @@ def getDirs(section, subsection, link='hard'): try: output_directory = os.path.join(core.OUTPUTDIRECTORY, subsection) if os.path.exists(output_directory): - to_return.extend(processDir(output_directory)) + to_return.extend(process_dir(output_directory)) except Exception as e: logger.error("Failed to add directories from {0} for post-processing: {1}".format(core.OUTPUTDIRECTORY, e)) @@ -771,7 +771,7 @@ def onerror(func, path, exc_info): raise Exception -def rmDir(dirName): +def remove_dir(dirName): logger.info("Deleting {0}".format(dirName)) try: shutil.rmtree(text_type(dirName), onerror=onerror) @@ -779,19 +779,19 @@ def rmDir(dirName): logger.error("Unable to delete folder {0}".format(dirName)) -def cleanDir(path, section, subsection): +def clean_dir(path, section, subsection): cfg = dict(core.CFG[section][subsection]) if not os.path.exists(path): logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR') return if core.FORCE_CLEAN and not core.FAILED: logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') - rmDir(path) + remove_dir(path) return min_size = int(cfg.get('minSize', 0)) delete_ignored = int(cfg.get('delete_ignored', 0)) try: - num_files = len(listMediaFiles(path, minSize=min_size, delete_ignored=delete_ignored)) + num_files = len(list_media_files(path, minSize=min_size, delete_ignored=delete_ignored)) except: num_files = 'unknown' if num_files > 0: @@ -994,7 +994,7 @@ def get_nzoid(inputName): return nzoid -def cleanFileName(filename): +def clean_file_name(filename): """Cleans up nzb name by removing any . and _ characters, along with any trailing hyphens. @@ -1020,7 +1020,7 @@ def is_archive_file(filename): return False -def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): +def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): file_name, file_ext = os.path.splitext(mediafile) try: @@ -1039,14 +1039,14 @@ def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, oth return False -def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): +def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): files = [] if not os.path.isdir(path): if os.path.isfile(path): # Single file downloads. cur_file = os.path.split(path)[1] - if isMediaFile(cur_file, media, audio, meta, archives, other, otherext): + if is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(path) or not is_minSize(path, minSize): + if is_sample(path) or not is_min_size(path, minSize): if delete_ignored == 1: try: os.unlink(path) @@ -1064,11 +1064,11 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me # if it's a folder do it recursively if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): - files += listMediaFiles(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) + files += list_media_files(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) - elif isMediaFile(cur_file, media, audio, meta, archives, other, otherext): + elif is_media_file(cur_file, media, audio, meta, archives, other, otherext): # Optionally ignore sample files - if is_sample(full_cur_file) or not is_minSize(full_cur_file, minSize): + if is_sample(full_cur_file) or not is_min_size(full_cur_file, minSize): if delete_ignored == 1: try: os.unlink(full_cur_file) @@ -1160,11 +1160,11 @@ def find_imdbid(dirName, inputName, omdbApiKey): return imdbid -def extractFiles(src, dst=None, keep_archive=None): +def extract_files(src, dst=None, keep_archive=None): extracted_folder = [] extracted_archive = [] - for inputFile in listMediaFiles(src, media=False, audio=False, meta=False, archives=True): + for inputFile in list_media_files(src, media=False, audio=False, meta=False, archives=True): dir_path = os.path.dirname(inputFile) full_file_name = os.path.basename(inputFile) archive_name = os.path.splitext(full_file_name)[0] @@ -1181,7 +1181,7 @@ def extractFiles(src, dst=None, keep_archive=None): logger.error("Extraction failed for: {0}".format(full_file_name)) for folder in extracted_folder: - for inputFile in listMediaFiles(folder, media=False, audio=False, meta=False, archives=True): + for inputFile in list_media_files(folder, media=False, audio=False, meta=False, archives=True): full_file_name = os.path.basename(inputFile) archive_name = os.path.splitext(full_file_name)[0] archive_name = re.sub(r"part[0-9]+", "", archive_name) @@ -1258,7 +1258,7 @@ def plex_update(category): logger.debug("Could not identify section for plex update", 'PLEX') -def backupVersionedFile(old_file, version): +def backup_versioned_file(old_file, version): num_tries = 0 new_file = '{old}.v{version}'.format(old=old_file, version=version) @@ -1287,7 +1287,7 @@ def backupVersionedFile(old_file, version): return True -def update_downloadInfoStatus(inputName, status): +def update_download_info_status(inputName, status): logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) my_db = nzbToMediaDB.DBConnection() @@ -1295,7 +1295,7 @@ def update_downloadInfoStatus(inputName, status): [status, datetime.date.today().toordinal(), text_type(inputName)]) -def get_downloadInfo(inputName, status): +def get_download_info(inputName, status): logger.db("Getting download info for {0} from the DB".format(inputName)) my_db = nzbToMediaDB.DBConnection() diff --git a/core/transcoder/transcoder.py b/core/transcoder/transcoder.py index cb812642..deb74e4b 100644 --- a/core/transcoder/transcoder.py +++ b/core/transcoder/transcoder.py @@ -13,17 +13,17 @@ from six import iteritems, text_type, string_types import core from core import logger -from core.nzbToMediaUtil import makeDir +from core.nzbToMediaUtil import make_dir -def isVideoGood(videofile, status): +def is_video_good(videofile, status): file_name_ext = os.path.basename(videofile) file_name, file_ext = os.path.splitext(file_name_ext) disable = False if file_ext not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED): disable = True else: - test_details, res = getVideoDetails(core.TEST_FILE) + test_details, res = get_video_details(core.TEST_FILE) if res != 0 or test_details.get("error"): disable = True logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER') @@ -41,7 +41,7 @@ def isVideoGood(videofile, status): return True logger.info('Checking [{0}] for corruption, please stand by ...'.format(file_name_ext), 'TRANSCODER') - video_details, result = getVideoDetails(videofile) + video_details, result = get_video_details(videofile) if result != 0: logger.error("FAILED: [{0}] is corrupted!".format(file_name_ext), 'TRANSCODER') @@ -72,7 +72,7 @@ def zip_out(file, img, bitbucket): return procin -def getVideoDetails(videofile, img=None, bitbucket=None): +def get_video_details(videofile, img=None, bitbucket=None): video_details = {} result = 1 file = videofile @@ -116,12 +116,12 @@ def getVideoDetails(videofile, img=None, bitbucket=None): return video_details, result -def buildCommands(file, newDir, movieName, bitbucket): +def build_commands(file, newDir, movieName, bitbucket): if isinstance(file, string_types): input_file = file if 'concat:' in file: file = file.split('|')[0].replace('concat:', '') - video_details, result = getVideoDetails(file) + video_details, result = get_video_details(file) dir, name = os.path.split(file) name, ext = os.path.splitext(name) check = re.match("VTS_([0-9][0-9])_[0-9]+", name) @@ -136,7 +136,7 @@ def buildCommands(file, newDir, movieName, bitbucket): else: img, data = next(iteritems(file)) name = data['name'] - video_details, result = getVideoDetails(data['files'][0], img, bitbucket) + video_details, result = get_video_details(data['files'][0], img, bitbucket) input_file = '-' file = '-' @@ -471,7 +471,7 @@ def buildCommands(file, newDir, movieName, bitbucket): if core.SEMBED and os.path.isfile(file): for subfile in get_subs(file): - sub_details, result = getVideoDetails(subfile) + sub_details, result = get_video_details(subfile) if not sub_details or not sub_details.get("streams"): continue if core.SCODEC == "mov_text": @@ -528,7 +528,7 @@ def get_subs(file): def extract_subs(file, newfilePath, bitbucket): - video_details, result = getVideoDetails(file) + video_details, result = get_video_details(file) if not video_details: return @@ -586,7 +586,7 @@ def extract_subs(file, newfilePath, bitbucket): logger.error("Extracting subtitles has failed") -def processList(List, newDir, bitbucket): +def process_list(List, newDir, bitbucket): rem_list = [] new_list = [] combine = [] @@ -596,7 +596,7 @@ def processList(List, newDir, bitbucket): ext = os.path.splitext(item)[1].lower() if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") - new_list.extend(ripISO(item, newDir, bitbucket)) + new_list.extend(rip_iso(item, newDir, bitbucket)) rem_list.append(item) elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") @@ -614,9 +614,9 @@ def processList(List, newDir, bitbucket): else: continue if vts_path: - new_list.extend(combineVTS(vts_path)) + new_list.extend(combine_vts(vts_path)) if combine: - new_list.extend(combineCD(combine)) + new_list.extend(combine_cd(combine)) for file in new_list: if isinstance(file, string_types) and 'concat:' not in file and not os.path.isfile(file): success = False @@ -633,7 +633,7 @@ def processList(List, newDir, bitbucket): return List, rem_list, new_list, success -def ripISO(item, newDir, bitbucket): +def rip_iso(item, newDir, bitbucket): new_files = [] failure_dir = 'failure' # Mount the ISO in your OS and call combineVTS. @@ -681,7 +681,7 @@ def ripISO(item, newDir, bitbucket): return new_files -def combineVTS(vtsPath): +def combine_vts(vtsPath): new_files = [] combined = '' for n in range(99): @@ -705,7 +705,7 @@ def combineVTS(vtsPath): return new_files -def combineCD(combine): +def combine_cd(combine): new_files = [] for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]): concat = '' @@ -728,17 +728,17 @@ def print_cmd(command): logger.debug("calling command:{0}".format(cmd)) -def Transcode_directory(dirName): +def transcode_directory(dirName): if not core.FFMPEG: return 1, dirName logger.info("Checking for files to be transcoded") final_result = 0 # initialize as successful if core.OUTPUTVIDEOPATH: new_dir = core.OUTPUTVIDEOPATH - makeDir(new_dir) + make_dir(new_dir) name = os.path.splitext(os.path.split(dirName)[1])[0] new_dir = os.path.join(new_dir, name) - makeDir(new_dir) + make_dir(new_dir) else: new_dir = dirName if platform.system() == 'Windows': @@ -746,8 +746,8 @@ def Transcode_directory(dirName): else: bitbucket = open('/dev/null') movie_name = os.path.splitext(os.path.split(dirName)[1])[0] - file_list = core.listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) - file_list, rem_list, new_list, success = processList(file_list, new_dir, bitbucket) + file_list = core.list_media_files(dirName, media=True, audio=False, meta=False, archives=False) + file_list, rem_list, new_list, success = process_list(file_list, new_dir, bitbucket) if not success: bitbucket.close() return 1, dirName @@ -755,7 +755,7 @@ def Transcode_directory(dirName): for file in file_list: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: continue - command = buildCommands(file, new_dir, movie_name, bitbucket) + command = build_commands(file, new_dir, movie_name, bitbucket) newfile_path = command[-1] # transcoding files may remove the original file, so make sure to extract subtitles first diff --git a/nzbToMedia.py b/nzbToMedia.py index 4079f87b..4735f018 100755 --- a/nzbToMedia.py +++ b/nzbToMedia.py @@ -635,7 +635,7 @@ from core.autoProcess.autoProcessMovie import autoProcessMovie from core.autoProcess.autoProcessMusic import autoProcessMusic from core.autoProcess.autoProcessTV import autoProcessTV from core.nzbToMediaUserScript import external_script -from core.nzbToMediaUtil import CharReplace, cleanDir, convert_to_ascii, extractFiles, getDirs, get_downloadInfo, get_nzoid, plex_update, update_downloadInfoStatus +from core.nzbToMediaUtil import char_replace, clean_dir, convert_to_ascii, extract_files, get_dirs, get_download_info, get_nzoid, plex_update, update_download_info_status try: text_type = unicode @@ -666,8 +666,8 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down input_name1 = input_name try: - encoded, input_directory1 = CharReplace(input_directory) - encoded, input_name1 = CharReplace(input_name) + encoded, input_directory1 = char_replace(input_directory) + encoded, input_name1 = char_replace(input_name) except: pass @@ -727,7 +727,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if extract == 1: logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) - extractFiles(input_directory) + extract_files(input_directory) logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) @@ -735,13 +735,13 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down result = autoProcessMovie().process(section_name, input_directory, input_name, status, clientAgent, download_id, input_category, failureLink) elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: - result = autoProcessTV().processEpisode(section_name, input_directory, input_name, status, clientAgent, - download_id, input_category, failureLink) + result = autoProcessTV().process_episode(section_name, input_directory, input_name, status, clientAgent, + download_id, input_category, failureLink) elif section_name in ["HeadPhones", "Lidarr"]: result = autoProcessMusic().process(section_name, input_directory, input_name, status, clientAgent, input_category) elif section_name == "Mylar": - result = autoProcessComics().processEpisode(section_name, input_directory, input_name, status, clientAgent, - input_category) + result = autoProcessComics().process_episode(section_name, input_directory, input_name, status, clientAgent, + input_category) elif section_name == "Gamez": result = autoProcessGames().process(section_name, input_directory, input_name, status, clientAgent, input_category) elif section_name == 'UserScript': @@ -754,10 +754,10 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down if result[0] == 0: if clientAgent != 'manual': # update download status in our DB - update_downloadInfoStatus(input_name, 1) + update_download_info_status(input_name, 1) if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: # cleanup our processing folders of any misc unwanted files and empty directories - cleanDir(input_directory, section_name, input_category) + clean_dir(input_directory, section_name, input_category) return result @@ -879,11 +879,11 @@ def main(args, section=None): for subsection in subsections: if not core.CFG[section][subsection].isenabled(): continue - for dir_name in getDirs(section, subsection, link='move'): + for dir_name in get_dirs(section, subsection, link='move'): logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dir_name)) logger.info("Checking database for download info for {0} ...".format(os.path.basename(dir_name))) - core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dir_name), 0) + core.DOWNLOADINFO = get_download_info(os.path.basename(dir_name), 0) if core.DOWNLOADINFO: logger.info("Found download info for {0}, " "setting variables now ...".format diff --git a/tests/general.py b/tests/general.py index 89f403f8..e0410c14 100755 --- a/tests/general.py +++ b/tests/general.py @@ -5,7 +5,7 @@ import guessit import requests import core -from core.nzbToMediaAutoFork import autoFork +from core.nzbToMediaAutoFork import auto_fork from core.nzbToMediaUtil import server_responding from core.transcoder import transcoder @@ -15,7 +15,7 @@ core.initialize() #label = core.TORRENT_CLASS.core.get_torrent_status("f33a9c4b15cbd9170722d700069af86746817ade", ["label"]).get()['label'] #print label -if transcoder.isVideoGood(core.TEST_FILE, 0): +if transcoder.is_video_good(core.TEST_FILE, 0): print("FFPROBE Works") else: print("FFPROBE FAILED") @@ -25,7 +25,7 @@ print(test) section = core.CFG.findsection('tv').isenabled() print(section) print(len(section)) -fork, fork_params = autoFork('SickBeard', 'tv') +fork, fork_params = auto_fork('SickBeard', 'tv') if server_responding("http://127.0.0.1:5050"): print("CouchPotato Running")