PEP8 Function name should be lowercase

This commit is contained in:
Labrys of Knossos 2018-12-16 21:37:44 -05:00
parent 97e1ed71b3
commit d8cbf422dd
18 changed files with 221 additions and 221 deletions

View file

@ -7,11 +7,11 @@ import sys
import core import core
from core import logger, nzbToMediaDB from core import logger, nzbToMediaDB
from core.nzbToMediaUserScript import external_script from core.nzbToMediaUserScript import external_script
from core.nzbToMediaUtil import CharReplace, convert_to_ascii, plex_update, replace_links from core.nzbToMediaUtil import char_replace, convert_to_ascii, plex_update, replace_links
from libs.six import text_type from libs.six import text_type
def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent):
input_directory = inputDirectory input_directory = inputDirectory
input_name = inputName input_name = inputName
input_category = inputCategory input_category = inputCategory
@ -29,8 +29,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
input_name1 = input_name input_name1 = input_name
try: try:
encoded, input_directory1 = CharReplace(input_directory) encoded, input_directory1 = char_replace(input_directory)
encoded, input_name1 = CharReplace(input_name) encoded, input_name1 = char_replace(input_name)
except: except:
pass pass
@ -109,12 +109,12 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
# This way Processing is isolated. # This way Processing is isolated.
if not os.path.isdir(os.path.join(input_directory, input_name)): if not os.path.isdir(os.path.join(input_directory, input_name)):
basename = os.path.basename(input_directory) basename = os.path.basename(input_directory)
basename = core.sanitizeName(input_name) \ basename = core.sanitize_name(input_name) \
if input_name == basename else os.path.splitext(core.sanitizeName(input_name))[0] if input_name == basename else os.path.splitext(core.sanitize_name(input_name))[0]
output_destination = os.path.join(core.OUTPUTDIRECTORY, input_category, basename) output_destination = os.path.join(core.OUTPUTDIRECTORY, input_category, basename)
elif unique_path: elif unique_path:
output_destination = os.path.normpath( output_destination = os.path.normpath(
core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitizeName(input_name).replace(" ","."))) core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitize_name(input_name).replace(" ", ".")))
else: else:
output_destination = os.path.normpath( output_destination = os.path.normpath(
core.os.path.join(core.OUTPUTDIRECTORY, input_category)) core.os.path.join(core.OUTPUTDIRECTORY, input_category))
@ -143,9 +143,9 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
now = datetime.datetime.now() now = datetime.datetime.now()
if extract == 1: if extract == 1:
input_files = core.listMediaFiles(input_directory, archives=False, other=True, otherext=extensions) input_files = core.list_media_files(input_directory, archives=False, other=True, otherext=extensions)
else: else:
input_files = core.listMediaFiles(input_directory, other=True, otherext=extensions) input_files = core.list_media_files(input_directory, other=True, otherext=extensions)
if len(input_files) == 0 and os.path.isfile(input_directory): if len(input_files) == 0 and os.path.isfile(input_directory):
input_files = [input_directory] input_files = [input_directory]
logger.debug("Found 1 file to process: {0}".format(input_directory)) logger.debug("Found 1 file to process: {0}".format(input_directory))
@ -170,8 +170,8 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
if root == 1: if root == 1:
if not found_file: if not found_file:
logger.debug("Looking for {0} in: {1}".format(input_name, inputFile)) logger.debug("Looking for {0} in: {1}".format(input_name, inputFile))
if any([core.sanitizeName(input_name) in core.sanitizeName(inputFile), if any([core.sanitize_name(input_name) in core.sanitize_name(inputFile),
core.sanitizeName(file_name) in core.sanitizeName(input_name)]): core.sanitize_name(file_name) in core.sanitize_name(input_name)]):
found_file = True found_file = True
logger.debug("Found file {0} that matches Torrent Name {1}".format logger.debug("Found file {0} that matches Torrent Name {1}".format
(full_file_name, input_name)) (full_file_name, input_name))
@ -194,7 +194,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
if torrent_no_link == 0: if torrent_no_link == 0:
try: try:
core.copy_link(inputFile, target_file, core.USELINK) core.copy_link(inputFile, target_file, core.USELINK)
core.rmReadOnly(target_file) core.remove_read_only(target_file)
except: except:
logger.error("Failed to link: {0} to {1}".format(inputFile, target_file)) logger.error("Failed to link: {0} to {1}".format(inputFile, target_file))
@ -202,7 +202,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
if extract == 1: if extract == 1:
logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory))
core.extractFiles(input_directory, output_destination, keep_archive) core.extract_files(input_directory, output_destination, keep_archive)
if input_category not in core.NOFLATTEN: if input_category not in core.NOFLATTEN:
# don't flatten hp in case multi cd albums, and we need to copy this back later. # don't flatten hp in case multi cd albums, and we need to copy this back later.
@ -211,7 +211,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
# Now check if video files exist in destination: # Now check if video files exist in destination:
if section_name in ["SickBeard", "NzbDrone", "Sonarr", "CouchPotato", "Radarr"]: if section_name in ["SickBeard", "NzbDrone", "Sonarr", "CouchPotato", "Radarr"]:
num_videos = len( num_videos = len(
core.listMediaFiles(output_destination, media=True, audio=False, meta=False, archives=False)) core.list_media_files(output_destination, media=True, audio=False, meta=False, archives=False))
if num_videos > 0: if num_videos > 0:
logger.info("Found {0} media files in {1}".format(num_videos, output_destination)) logger.info("Found {0} media files in {1}".format(num_videos, output_destination))
status = 0 status = 0
@ -241,14 +241,14 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']:
if input_hash: if input_hash:
input_hash = input_hash.upper() input_hash = input_hash.upper()
result = core.autoProcessTV().processEpisode(section_name, output_destination, input_name, result = core.autoProcessTV().process_episode(section_name, output_destination, input_name,
status, clientAgent, input_hash, input_category) status, clientAgent, input_hash, input_category)
elif section_name in ['HeadPhones', 'Lidarr']: elif section_name in ['HeadPhones', 'Lidarr']:
result = core.autoProcessMusic().process(section_name, output_destination, input_name, result = core.autoProcessMusic().process(section_name, output_destination, input_name,
status, clientAgent, input_category) status, clientAgent, input_category)
elif section_name == 'Mylar': elif section_name == 'Mylar':
result = core.autoProcessComics().processEpisode(section_name, output_destination, input_name, result = core.autoProcessComics().process_episode(section_name, output_destination, input_name,
status, clientAgent, input_category) status, clientAgent, input_category)
elif section_name == 'Gamez': elif section_name == 'Gamez':
result = core.autoProcessGames().process(section_name, output_destination, input_name, result = core.autoProcessGames().process(section_name, output_destination, input_name,
status, clientAgent, input_category) status, clientAgent, input_category)
@ -267,7 +267,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
else: else:
if clientAgent != 'manual': if clientAgent != 'manual':
# update download status in our DB # update download status in our DB
core.update_downloadInfoStatus(input_name, 1) core.update_download_info_status(input_name, 1)
# remove torrent # remove torrent
if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1:
@ -281,7 +281,7 @@ def processTorrent(inputDirectory, inputName, inputCategory, inputHash, inputID,
if not section_name == 'UserScript': if not section_name == 'UserScript':
# for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN
# cleanup our processing folders of any misc unwanted files and empty directories # cleanup our processing folders of any misc unwanted files and empty directories
core.cleanDir(output_destination, section_name, input_category) core.clean_dir(output_destination, section_name, input_category)
return result return result
@ -310,7 +310,7 @@ def main(args):
return -1 return -1
if input_directory and input_name and input_hash and input_id: if input_directory and input_name and input_hash and input_id:
result = processTorrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) result = process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent)
else: else:
# Perform Manual Post-Processing # Perform Manual Post-Processing
logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...") logger.warning("Invalid number of arguments received from client, Switching to manual run mode ...")
@ -319,13 +319,13 @@ def main(args):
for subsection in subsections: for subsection in subsections:
if not core.CFG[section][subsection].isenabled(): if not core.CFG[section][subsection].isenabled():
continue continue
for dir_name in core.getDirs(section, subsection, link='hard'): for dir_name in core.get_dirs(section, subsection, link='hard'):
logger.info("Starting manual run for {0}:{1} - Folder:{2}".format logger.info("Starting manual run for {0}:{1} - Folder:{2}".format
(section, subsection, dir_name)) (section, subsection, dir_name))
logger.info("Checking database for download info for {0} ...".format logger.info("Checking database for download info for {0} ...".format
(os.path.basename(dir_name))) (os.path.basename(dir_name)))
core.DOWNLOADINFO = core.get_downloadInfo(os.path.basename(dir_name), 0) core.DOWNLOADINFO = core.get_download_info(os.path.basename(dir_name), 0)
if core.DOWNLOADINFO: if core.DOWNLOADINFO:
client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual'))
input_hash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) input_hash = text_type(core.DOWNLOADINFO[0].get('input_hash', ''))
@ -353,8 +353,8 @@ def main(args):
except UnicodeError: except UnicodeError:
pass pass
results = processTorrent(dir_name, input_name, subsection, input_hash or None, input_id or None, results = process_torrent(dir_name, input_name, subsection, input_hash or None, input_id or None,
client_agent) client_agent)
if results[0] != 0: if results[0] != 0:
logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format
(section, subsection)) (section, subsection))

View file

@ -45,10 +45,10 @@ from core.autoProcess.autoProcessTV import autoProcessTV
from core.databases import mainDB from core.databases import mainDB
from core.nzbToMediaConfig import config from core.nzbToMediaConfig import config
from core.nzbToMediaUtil import ( from core.nzbToMediaUtil import (
RunningProcess, WakeUp, category_search, cleanDir, cleanDir, copy_link, RunningProcess, wake_up, category_search, clean_dir, clean_dir, copy_link,
create_torrent_class, extractFiles, flatten, getDirs, get_downloadInfo, create_torrent_class, extract_files, flatten, get_dirs, get_download_info,
listMediaFiles, makeDir, parse_args, pause_torrent, remove_torrent, list_media_files, make_dir, parse_args, pause_torrent, remove_torrent,
resume_torrent, rmDir, rmReadOnly, sanitizeName, update_downloadInfoStatus, resume_torrent, remove_dir, remove_read_only, sanitize_name, update_download_info_status,
) )
from core.transcoder import transcoder from core.transcoder import transcoder
@ -255,7 +255,7 @@ def initialize(section=None):
LOG_FILE = os.environ['NTM_LOGFILE'] LOG_FILE = os.environ['NTM_LOGFILE']
LOG_DIR = os.path.split(LOG_FILE)[0] LOG_DIR = os.path.split(LOG_FILE)[0]
if not makeDir(LOG_DIR): if not make_dir(LOG_DIR):
print("No log folder, logging to screen only") print("No log folder, logging to screen only")
MYAPP = RunningProcess() MYAPP = RunningProcess()
@ -291,7 +291,7 @@ def initialize(section=None):
sys.exit(1) sys.exit(1)
# init logging # init logging
logger.ntm_log_instance.initLogging() logger.ntm_log_instance.init_logging()
# run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options.
if not config.migrate(): if not config.migrate():
@ -320,7 +320,7 @@ def initialize(section=None):
logger.info("{0}: {1}".format(item, os.environ[item]), "ENVIRONMENT") logger.info("{0}: {1}".format(item, os.environ[item]), "ENVIRONMENT")
# initialize the main SB database # initialize the main SB database
nzbToMediaDB.upgradeDatabase(nzbToMediaDB.DBConnection(), mainDB.InitialSchema) nzbToMediaDB.upgrade_database(nzbToMediaDB.DBConnection(), mainDB.InitialSchema)
# Set Version and GIT variables # Set Version and GIT variables
NZBTOMEDIA_VERSION = '11.06' NZBTOMEDIA_VERSION = '11.06'
@ -357,7 +357,7 @@ def initialize(section=None):
system=platform.system(), release=platform.release())) system=platform.system(), release=platform.release()))
if int(CFG["WakeOnLan"]["wake"]) == 1: if int(CFG["WakeOnLan"]["wake"]) == 1:
WakeUp() wake_up()
NZB_CLIENTAGENT = CFG["Nzb"]["clientAgent"] # sabnzbd NZB_CLIENTAGENT = CFG["Nzb"]["clientAgent"] # sabnzbd
SABNZBDHOST = CFG["Nzb"]["sabnzbd_host"] SABNZBDHOST = CFG["Nzb"]["sabnzbd_host"]

View file

@ -6,13 +6,13 @@ import requests
import core import core
from core import logger from core import logger
from core.nzbToMediaUtil import convert_to_ascii, remoteDir, server_responding from core.nzbToMediaUtil import convert_to_ascii, remote_dir, server_responding
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
class autoProcessComics(object): class autoProcessComics(object):
def processEpisode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): def process_episode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None):
dir_name = dirName dir_name = dirName
input_name = inputName input_name = inputName
@ -42,7 +42,7 @@ class autoProcessComics(object):
params = { params = {
'cmd': 'forceProcess', 'cmd': 'forceProcess',
'apikey': apikey, 'apikey': apikey,
'nzb_folder': remoteDir(dir_name) if remote_path else dir_name, 'nzb_folder': remote_dir(dir_name) if remote_path else dir_name,
} }
if input_name is not None: if input_name is not None:

View file

@ -9,7 +9,7 @@ import requests
import core import core
from core import logger from core import logger
from core.nzbToMediaSceneExceptions import process_all_exceptions from core.nzbToMediaSceneExceptions import process_all_exceptions
from core.nzbToMediaUtil import convert_to_ascii, find_download, find_imdbid, import_subs, listMediaFiles, remoteDir, reportNzb, rmDir, server_responding from core.nzbToMediaUtil import convert_to_ascii, find_download, find_imdbid, import_subs, list_media_files, remote_dir, report_nzb, remove_dir, server_responding
from core.transcoder import transcoder from core.transcoder import transcoder
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
@ -129,7 +129,7 @@ class autoProcessMovie(object):
logger.error("{0} did not return expected json data.".format(section), section) logger.error("{0} did not return expected json data.".format(section), section)
return None return None
def CDH(self, url2, headers, section="MAIN"): def completed_download_handling(self, url2, headers, section="MAIN"):
try: try:
r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60))
except requests.ConnectionError: except requests.ConnectionError:
@ -223,17 +223,17 @@ class autoProcessMovie(object):
process_all_exceptions(input_name, dir_name) process_all_exceptions(input_name, dir_name)
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name))
core.extractFiles(dir_name) core.extract_files(dir_name)
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
good_files = 0 good_files = 0
num_files = 0 num_files = 0
# Check video files for corruption # Check video files for corruption
for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
num_files += 1 num_files += 1
if transcoder.isVideoGood(video, status): if transcoder.is_video_good(video, status):
import_subs(video) import_subs(video)
good_files += 1 good_files += 1
if num_files and good_files == num_files: if num_files and good_files == num_files:
@ -258,7 +258,7 @@ class autoProcessMovie(object):
if status == 0: if status == 0:
if core.TRANSCODE == 1: if core.TRANSCODE == 1:
result, new_dir_name = transcoder.Transcode_directory(dir_name) result, new_dir_name = transcoder.transcode_directory(dir_name)
if result == 0: if result == 0:
logger.debug("Transcoding succeeded for files in {0}".format(dir_name), section) logger.debug("Transcoding succeeded for files in {0}".format(dir_name), section)
dir_name = new_dir_name dir_name = new_dir_name
@ -271,7 +271,7 @@ class autoProcessMovie(object):
else: else:
logger.error("Transcoding failed for files in {0}".format(dir_name), section) logger.error("Transcoding failed for files in {0}".format(dir_name), section)
return [1, "{0}: Failed to post-process - Transcoding failed".format(section)] return [1, "{0}: Failed to post-process - Transcoding failed".format(section)]
for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
if not release and ".cp(tt" not in video and imdbid: if not release and ".cp(tt" not in video and imdbid:
video_name, video_ext = os.path.splitext(video) video_name, video_ext = os.path.splitext(video)
video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext) video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext)
@ -288,7 +288,7 @@ class autoProcessMovie(object):
params['downloader'] = downloader or clientAgent params['downloader'] = downloader or clientAgent
params['download_id'] = download_id params['download_id'] = download_id
params['media_folder'] = remoteDir(dir_name) if remote_path else dir_name params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name
if section == "CouchPotato": if section == "CouchPotato":
if method == "manage": if method == "manage":
@ -344,7 +344,7 @@ class autoProcessMovie(object):
core.FAILED = True core.FAILED = True
logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section) logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section)
if failureLink: if failureLink:
reportNzb(failureLink, clientAgent) report_nzb(failureLink, clientAgent)
if section == "Radarr": if section == "Radarr":
logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section) logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section)
@ -352,7 +352,7 @@ class autoProcessMovie(object):
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section)
rmDir(dir_name) remove_dir(dir_name)
if not release_id and not media_id: if not release_id and not media_id:
logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(input_name), logger.error("Could not find a downloaded movie in the database matching {0}, exiting!".format(input_name),
@ -451,7 +451,7 @@ class autoProcessMovie(object):
dir_name), section) dir_name), section)
return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] return [0, "{0}: Successfully post-processed {1}".format(section, input_name)]
elif not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=True): elif not list_media_files(dir_name, media=True, audio=False, meta=False, archives=True):
logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format( logger.postprocess("SUCCESS: Input Directory [{0}] has no remaining media files. This has been fully processed.".format(
dir_name), section) dir_name), section)
return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] return [0, "{0}: Successfully post-processed {1}".format(section, input_name)]
@ -460,7 +460,7 @@ class autoProcessMovie(object):
time.sleep(10 * wait_for) time.sleep(10 * wait_for)
# The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now. # The status hasn't changed. we have waited wait_for minutes which is more than enough. uTorrent can resume seeding now.
if section == "Radarr" and self.CDH(url2, headers, section=section): if section == "Radarr" and self.completed_download_handling(url2, headers, section=section):
logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section)
return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)]
logger.warning( logger.warning(

View file

@ -9,7 +9,7 @@ import requests
import core import core
from core import logger from core import logger
from core.nzbToMediaSceneExceptions import process_all_exceptions from core.nzbToMediaSceneExceptions import process_all_exceptions
from core.nzbToMediaUtil import convert_to_ascii, listMediaFiles, remoteDir, rmDir, server_responding from core.nzbToMediaUtil import convert_to_ascii, list_media_files, remote_dir, remove_dir, server_responding
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
@ -58,7 +58,7 @@ class autoProcessMusic(object):
if os.path.basename(dirName) == album['FolderName']: if os.path.basename(dirName) == album['FolderName']:
return album["Status"].lower() return album["Status"].lower()
def forceProcess(self, params, url, apikey, inputName, dirName, section, wait_for): def force_process(self, params, url, apikey, inputName, dirName, section, wait_for):
release_status = self.get_status(url, apikey, dirName) release_status = self.get_status(url, apikey, dirName)
if not release_status: if not release_status:
logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section) logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section)
@ -140,9 +140,9 @@ class autoProcessMusic(object):
process_all_exceptions(input_name, dir_name) process_all_exceptions(input_name, dir_name)
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
if not listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: if not list_media_files(dir_name, media=False, audio=True, meta=False, archives=False) and list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name))
core.extractFiles(dir_name) core.extract_files(dir_name)
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
#if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status: #if listMediaFiles(dir_name, media=False, audio=True, meta=False, archives=False) and status:
@ -154,20 +154,20 @@ class autoProcessMusic(object):
params = { params = {
'apikey': apikey, 'apikey': apikey,
'cmd': "forceProcess", 'cmd': "forceProcess",
'dir': remoteDir(dir_name) if remote_path else dir_name 'dir': remote_dir(dir_name) if remote_path else dir_name
} }
res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) res = self.force_process(params, url, apikey, input_name, dir_name, section, wait_for)
if res[0] in [0, 1]: if res[0] in [0, 1]:
return res return res
params = { params = {
'apikey': apikey, 'apikey': apikey,
'cmd': "forceProcess", 'cmd': "forceProcess",
'dir': os.path.split(remoteDir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0]
} }
res = self.forceProcess(params, url, apikey, input_name, dir_name, section, wait_for) res = self.force_process(params, url, apikey, input_name, dir_name, section, wait_for)
if res[0] in [0, 1]: if res[0] in [0, 1]:
return res return res
@ -179,8 +179,8 @@ class autoProcessMusic(object):
url = "{0}{1}:{2}{3}/api/v1/command".format(protocol, host, port, web_root) url = "{0}{1}:{2}{3}/api/v1/command".format(protocol, host, port, web_root)
headers = {"X-Api-Key": apikey} headers = {"X-Api-Key": apikey}
if remote_path: if remote_path:
logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) logger.debug("remote_path: {0}".format(remote_dir(dir_name)), section)
data = {"name": "Rename", "path": remoteDir(dir_name)} data = {"name": "Rename", "path": remote_dir(dir_name)}
else: else:
logger.debug("path: {0}".format(dir_name), section) logger.debug("path: {0}".format(dir_name), section)
data = {"name": "Rename", "path": dir_name} data = {"name": "Rename", "path": dir_name}
@ -238,5 +238,5 @@ class autoProcessMusic(object):
logger.warning("FAILED DOWNLOAD DETECTED", section) logger.warning("FAILED DOWNLOAD DETECTED", section)
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section)
rmDir(dir_name) remove_dir(dir_name)
return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader.

View file

@ -10,9 +10,9 @@ import requests
import core import core
from core import logger from core import logger
from core.nzbToMediaAutoFork import autoFork from core.nzbToMediaAutoFork import auto_fork
from core.nzbToMediaSceneExceptions import process_all_exceptions from core.nzbToMediaSceneExceptions import process_all_exceptions
from core.nzbToMediaUtil import convert_to_ascii, flatten, import_subs, listMediaFiles, remoteDir, reportNzb, rmDir, server_responding from core.nzbToMediaUtil import convert_to_ascii, flatten, import_subs, list_media_files, remote_dir, report_nzb, remove_dir, server_responding
from core.transcoder import transcoder from core.transcoder import transcoder
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
@ -36,7 +36,7 @@ class autoProcessTV(object):
logger.error("{0} did not return expected json data.".format(section), section) logger.error("{0} did not return expected json data.".format(section), section)
return None return None
def CDH(self, url2, headers, section="MAIN"): def completed_download_handling(self, url2, headers, section="MAIN"):
try: try:
r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60)) r = requests.get(url2, params={}, headers=headers, stream=True, verify=False, timeout=(30, 60))
except requests.ConnectionError: except requests.ConnectionError:
@ -52,7 +52,7 @@ class autoProcessTV(object):
# ValueError catches simplejson's JSONDecodeError and json's ValueError # ValueError catches simplejson's JSONDecodeError and json's ValueError
return False return False
def processEpisode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): def process_episode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None):
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][inputCategory])
@ -67,7 +67,7 @@ class autoProcessTV(object):
if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)):
# auto-detect correct fork # auto-detect correct fork
fork, fork_params = autoFork(section, inputCategory) fork, fork_params = auto_fork(section, inputCategory)
elif not username and not apikey: elif not username and not apikey:
logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only') logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only')
fork, fork_params = "None", {} fork, fork_params = "None", {}
@ -119,21 +119,21 @@ class autoProcessTV(object):
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
# Now check if tv files exist in destination. # Now check if tv files exist in destination.
if not listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): if not list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
if listMediaFiles(dir_name, media=False, audio=False, meta=False, archives=True) and extract: if list_media_files(dir_name, media=False, audio=False, meta=False, archives=True) and extract:
logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name)) logger.debug('Checking for archives to extract in directory: {0}'.format(dir_name))
core.extractFiles(dir_name) core.extract_files(dir_name)
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
if listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed. if list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): # Check that a video exists. if not, assume failed.
flatten(dir_name) flatten(dir_name)
# Check video files for corruption # Check video files for corruption
good_files = 0 good_files = 0
num_files = 0 num_files = 0
for video in listMediaFiles(dir_name, media=True, audio=False, meta=False, archives=False): for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
num_files += 1 num_files += 1
if transcoder.isVideoGood(video, status): if transcoder.is_video_good(video, status):
good_files += 1 good_files += 1
import_subs(video) import_subs(video)
if num_files > 0: if num_files > 0:
@ -170,7 +170,7 @@ class autoProcessTV(object):
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads if status == 0 and core.TRANSCODE == 1: # only transcode successful downloads
result, new_dir_name = transcoder.Transcode_directory(dir_name) result, new_dir_name = transcoder.transcode_directory(dir_name)
if result == 0: if result == 0:
logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dir_name), section) logger.debug("SUCCESS: Transcoding succeeded for files in {0}".format(dir_name), section)
dir_name = new_dir_name dir_name = new_dir_name
@ -209,7 +209,7 @@ class autoProcessTV(object):
if param in ["dir_name", "dir", "proc_dir", "process_directory", "path"]: if param in ["dir_name", "dir", "proc_dir", "process_directory", "path"]:
fork_params[param] = dir_name fork_params[param] = dir_name
if remote_path: if remote_path:
fork_params[param] = remoteDir(dir_name) fork_params[param] = remote_dir(dir_name)
if param == "process_method": if param == "process_method":
if process_method: if process_method:
@ -249,7 +249,7 @@ class autoProcessTV(object):
else: else:
core.FAILED = True core.FAILED = True
if failureLink: if failureLink:
reportNzb(failureLink, clientAgent) report_nzb(failureLink, clientAgent)
if 'failed' in fork_params: if 'failed' in fork_params:
logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section)
elif section == "NzbDrone": elif section == "NzbDrone":
@ -259,7 +259,7 @@ class autoProcessTV(object):
logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section) logger.postprocess("FAILED: The download failed. {0} branch does not handle failed downloads. Nothing to process".format(fork), section)
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section)
rmDir(dir_name) remove_dir(dir_name)
return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader. return [1, "{0}: Failed to post-process. {1} does not support failed downloads".format(section, section)] # Return as failed to flag this in the downloader.
url = None url = None
@ -274,8 +274,8 @@ class autoProcessTV(object):
headers = {"X-Api-Key": apikey} headers = {"X-Api-Key": apikey}
# params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'}
if remote_path: if remote_path:
logger.debug("remote_path: {0}".format(remoteDir(dir_name)), section) logger.debug("remote_path: {0}".format(remote_dir(dir_name)), section)
data = {"name": "DownloadedEpisodesScan", "path": remoteDir(dir_name), "downloadClientId": download_id, "importMode": import_mode} data = {"name": "DownloadedEpisodesScan", "path": remote_dir(dir_name), "downloadClientId": download_id, "importMode": import_mode}
else: else:
logger.debug("path: {0}".format(dir_name), section) logger.debug("path: {0}".format(dir_name), section)
data = {"name": "DownloadedEpisodesScan", "path": dir_name, "downloadClientId": download_id, "importMode": import_mode} data = {"name": "DownloadedEpisodesScan", "path": dir_name, "downloadClientId": download_id, "importMode": import_mode}
@ -340,7 +340,7 @@ class autoProcessTV(object):
if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name: if status != 0 and delete_failed and not os.path.dirname(dir_name) == dir_name:
logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section) logger.postprocess("Deleting failed files and folder {0}".format(dir_name), section)
rmDir(dir_name) remove_dir(dir_name)
if success: if success:
return [0, "{0}: Successfully post-processed {1}".format(section, input_name)] return [0, "{0}: Successfully post-processed {1}".format(section, input_name)]
@ -365,7 +365,7 @@ class autoProcessTV(object):
elif command_status and command_status in ['failed']: elif command_status and command_status in ['failed']:
logger.debug("The Scan command has failed. Renaming was not successful.", section) logger.debug("The Scan command has failed. Renaming was not successful.", section)
# return [1, "%s: Failed to post-process %s" % (section, input_name) ] # return [1, "%s: Failed to post-process %s" % (section, input_name) ]
if self.CDH(url2, headers, section=section): if self.completed_download_handling(url2, headers, section=section):
logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section) logger.debug("The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.".format(section), section)
return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)] return [status, "{0}: Complete DownLoad Handling is enabled. Passing back to {1}".format(section, section)]
else: else:

View file

@ -1,15 +1,15 @@
# coding=utf-8 # coding=utf-8
from core import logger, nzbToMediaDB from core import logger, nzbToMediaDB
from core.nzbToMediaUtil import backupVersionedFile from core.nzbToMediaUtil import backup_versioned_file
MIN_DB_VERSION = 1 # oldest db version we support migrating from MIN_DB_VERSION = 1 # oldest db version we support migrating from
MAX_DB_VERSION = 2 MAX_DB_VERSION = 2
def backupDatabase(version): def backup_database(version):
logger.info("Backing up database before upgrade") logger.info("Backing up database before upgrade")
if not backupVersionedFile(nzbToMediaDB.dbFilename(), version): if not backup_versioned_file(nzbToMediaDB.db_filename(), version):
logger.log_error_and_exit("Database backup failed, abort upgrading database") logger.log_error_and_exit("Database backup failed, abort upgrading database")
else: else:
logger.info("Proceeding with upgrade") logger.info("Proceeding with upgrade")
@ -23,13 +23,13 @@ def backupDatabase(version):
class InitialSchema(nzbToMediaDB.SchemaUpgrade): class InitialSchema(nzbToMediaDB.SchemaUpgrade):
def test(self): def test(self):
no_update = False no_update = False
if self.hasTable("db_version"): if self.has_table("db_version"):
cur_db_version = self.checkDBVersion() cur_db_version = self.check_db_version()
no_update = not cur_db_version < MAX_DB_VERSION no_update = not cur_db_version < MAX_DB_VERSION
return no_update return no_update
def execute(self): def execute(self):
if not self.hasTable("downloads") and not self.hasTable("db_version"): if not self.has_table("downloads") and not self.has_table("db_version"):
queries = [ queries = [
"CREATE TABLE db_version (db_version INTEGER);", "CREATE TABLE db_version (db_version INTEGER);",
"CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));", "CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));",
@ -39,7 +39,7 @@ class InitialSchema(nzbToMediaDB.SchemaUpgrade):
self.connection.action(query) self.connection.action(query)
else: else:
cur_db_version = self.checkDBVersion() cur_db_version = self.check_db_version()
if cur_db_version < MIN_DB_VERSION: if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit(u"Your database version ({current}) is too old to migrate " logger.log_error_and_exit(u"Your database version ({current}) is too old to migrate "

View file

@ -88,7 +88,7 @@ def extract(filePath, outputDestination):
return False return False
# Create outputDestination folder # Create outputDestination folder
core.makeDir(outputDestination) core.make_dir(outputDestination)
if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)):
passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))] passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))]

View file

@ -14,7 +14,7 @@ class GitHub(object):
self.github_repo = github_repo self.github_repo = github_repo
self.branch = branch self.branch = branch
def _access_API(self, path, params=None): def _access_api(self, path, params=None):
""" """
Access the API at the path given and with the optional params given. Access the API at the path given and with the optional params given.
""" """
@ -32,7 +32,7 @@ class GitHub(object):
Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/ Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/
""" """
return self._access_API( return self._access_api(
['repos', self.github_repo_user, self.github_repo, 'commits'], ['repos', self.github_repo_user, self.github_repo, 'commits'],
params={'per_page': 100, 'sha': self.branch}, params={'per_page': 100, 'sha': self.branch},
) )
@ -49,7 +49,7 @@ class GitHub(object):
Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/
""" """
return self._access_API( return self._access_api(
['repos', self.github_repo_user, self.github_repo, 'compare', ['repos', self.github_repo_user, self.github_repo, 'compare',
'{base}...{head}'.format(base=base, head=head)], '{base}...{head}'.format(base=base, head=head)],
params={'per_page': per_page}, params={'per_page': per_page},

View file

@ -58,7 +58,7 @@ class NTMRotatingLogHandler(object):
handler.flush() handler.flush()
handler.close() handler.close()
def initLogging(self, consoleLogging=True): def init_logging(self, consoleLogging=True):
if consoleLogging: if consoleLogging:
self.console_logging = consoleLogging self.console_logging = consoleLogging

View file

@ -7,7 +7,7 @@ import core
from core import logger from core import logger
def autoFork(section, inputCategory): def auto_fork(section, inputCategory):
# auto-detect correct section # auto-detect correct section
# config settings # config settings

View file

@ -12,7 +12,7 @@ import core
from core import logger from core import logger
def dbFilename(filename="nzbtomedia.db", suffix=None): def db_filename(filename="nzbtomedia.db", suffix=None):
""" """
@param filename: The sqlite database filename to use. If not specified, @param filename: The sqlite database filename to use. If not specified,
will be made to be nzbtomedia.db will be made to be nzbtomedia.db
@ -29,13 +29,13 @@ class DBConnection(object):
def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None): def __init__(self, filename="nzbtomedia.db", suffix=None, row_type=None):
self.filename = filename self.filename = filename
self.connection = sqlite3.connect(dbFilename(filename), 20) self.connection = sqlite3.connect(db_filename(filename), 20)
if row_type == "dict": if row_type == "dict":
self.connection.row_factory = self._dict_factory self.connection.row_factory = self._dict_factory
else: else:
self.connection.row_factory = sqlite3.Row self.connection.row_factory = sqlite3.Row
def checkDBVersion(self): def check_db_version(self):
result = None result = None
try: try:
result = self.select("SELECT db_version FROM db_version") result = self.select("SELECT db_version FROM db_version")
@ -196,7 +196,7 @@ class DBConnection(object):
list(valueDict.values()) list(valueDict.values())
) )
def tableInfo(self, tableName): def table_info(self, tableName):
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName)) cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName))
columns = {} columns = {}
@ -212,7 +212,7 @@ class DBConnection(object):
return d return d
def sanityCheckDatabase(connection, sanity_check): def sanity_check_database(connection, sanity_check):
sanity_check(connection).check() sanity_check(connection).check()
@ -228,22 +228,22 @@ class DBSanityCheck(object):
# = Upgrade API = # = Upgrade API =
# =============== # ===============
def upgradeDatabase(connection, schema): def upgrade_database(connection, schema):
logger.log(u"Checking database structure...", logger.MESSAGE) logger.log(u"Checking database structure...", logger.MESSAGE)
_processUpgrade(connection, schema) _process_upgrade(connection, schema)
def prettyName(class_name): def pretty_name(class_name):
return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)]) return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)])
def _processUpgrade(connection, upgradeClass): def _process_upgrade(connection, upgradeClass):
instance = upgradeClass(connection) instance = upgradeClass(connection)
logger.log(u"Checking {name} database upgrade".format logger.log(u"Checking {name} database upgrade".format
(name=prettyName(upgradeClass.__name__)), logger.DEBUG) (name=pretty_name(upgradeClass.__name__)), logger.DEBUG)
if not instance.test(): if not instance.test():
logger.log(u"Database upgrade required: {name}".format logger.log(u"Database upgrade required: {name}".format
(name=prettyName(upgradeClass.__name__)), logger.MESSAGE) (name=pretty_name(upgradeClass.__name__)), logger.MESSAGE)
try: try:
instance.execute() instance.execute()
except sqlite3.DatabaseError as error: except sqlite3.DatabaseError as error:
@ -257,7 +257,7 @@ def _processUpgrade(connection, upgradeClass):
(name=upgradeClass.__name__), logger.DEBUG) (name=upgradeClass.__name__), logger.DEBUG)
for upgradeSubClass in upgradeClass.__subclasses__(): for upgradeSubClass in upgradeClass.__subclasses__():
_processUpgrade(connection, upgradeSubClass) _process_upgrade(connection, upgradeSubClass)
# Base migration class. All future DB changes should be subclassed from this class # Base migration class. All future DB changes should be subclassed from this class
@ -265,24 +265,24 @@ class SchemaUpgrade(object):
def __init__(self, connection): def __init__(self, connection):
self.connection = connection self.connection = connection
def hasTable(self, tableName): def has_table(self, tableName):
return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0 return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0
def hasColumn(self, tableName, column): def has_column(self, tableName, column):
return column in self.connection.tableInfo(tableName) return column in self.connection.table_info(tableName)
def addColumn(self, table, column, type="NUMERIC", default=0): def add_column(self, table, column, type="NUMERIC", default=0):
self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type)) self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type))
self.connection.action("UPDATE {0} SET {1} = ?".format(table, column), (default,)) self.connection.action("UPDATE {0} SET {1} = ?".format(table, column), (default,))
def checkDBVersion(self): def check_db_version(self):
result = self.connection.select("SELECT db_version FROM db_version") result = self.connection.select("SELECT db_version FROM db_version")
if result: if result:
return int(result[-1]["db_version"]) return int(result[-1]["db_version"])
else: else:
return 0 return 0
def incDBVersion(self): def inc_db_version(self):
new_version = self.checkDBVersion() + 1 new_version = self.check_db_version() + 1
self.connection.action("UPDATE db_version SET db_version = ?", [new_version]) self.connection.action("UPDATE db_version SET db_version = ?", [new_version])
return new_version return new_version

View file

@ -8,7 +8,7 @@ import subprocess
import core import core
from core import logger from core import logger
from core.nzbToMediaUtil import listMediaFiles from core.nzbToMediaUtil import list_media_files
reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.", reverse_list = [r"\.\d{2}e\d{2}s\.", r"\.[pi]0801\.", r"\.p027\.", r"\.[pi]675\.", r"\.[pi]084\.", r"\.p063\.",
r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.", r"\b[45]62[xh]\.", r"\.yarulb\.", r"\.vtd[hp]\.",
@ -32,7 +32,7 @@ char_replace = [[r"(\w)1\.(\w)", r"\1i\2"]
def process_all_exceptions(name, dirname): def process_all_exceptions(name, dirname):
par2(dirname) par2(dirname)
rename_script(dirname) rename_script(dirname)
for filename in listMediaFiles(dirname): for filename in list_media_files(dirname):
newfilename = None newfilename = None
parent_dir = os.path.dirname(filename) parent_dir = os.path.dirname(filename)
head, file_extension = os.path.splitext(os.path.basename(filename)) head, file_extension = os.path.splitext(os.path.basename(filename))

View file

@ -5,7 +5,7 @@ from subprocess import Popen
import core import core
from core import logger from core import logger
from core.nzbToMediaUtil import import_subs, listMediaFiles, rmDir from core.nzbToMediaUtil import import_subs, list_media_files, remove_dir
from core.transcoder import transcoder from core.transcoder import transcoder
@ -40,8 +40,8 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1)) core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1))
if core.CHECK_MEDIA: if core.CHECK_MEDIA:
for video in listMediaFiles(outputDestination, media=True, audio=False, meta=False, archives=False): for video in list_media_files(outputDestination, media=True, audio=False, meta=False, archives=False):
if transcoder.isVideoGood(video, 0): if transcoder.is_video_good(video, 0):
import_subs(video) import_subs(video)
else: else:
logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT") logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT")
@ -111,7 +111,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0:
logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination)) logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination))
rmDir(outputDestination) remove_dir(outputDestination)
elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0:
logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format( logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format(
num_files, num_files_new)) num_files, num_files_new))

View file

@ -32,11 +32,11 @@ requests.packages.urllib3.disable_warnings()
# Monkey Patch shutil.copyfileobj() to adjust the buffer length to 512KB rather than 4KB # Monkey Patch shutil.copyfileobj() to adjust the buffer length to 512KB rather than 4KB
shutil.copyfileobjOrig = shutil.copyfileobj shutil.copyfileobjOrig = shutil.copyfileobj
def copyfileobjFast(fsrc, fdst, length=512*1024): def copyfileobj_fast(fsrc, fdst, length=512 * 1024):
shutil.copyfileobjOrig(fsrc, fdst, length=length) shutil.copyfileobjOrig(fsrc, fdst, length=length)
shutil.copyfileobj = copyfileobjFast shutil.copyfileobj = copyfileobj_fast
def reportNzb(failure_link, clientAgent): def report_nzb(failure_link, clientAgent):
# Contact indexer site # Contact indexer site
logger.info("Sending failure notification to indexer site") logger.info("Sending failure notification to indexer site")
if clientAgent == 'nzbget': if clientAgent == 'nzbget':
@ -52,15 +52,15 @@ def reportNzb(failure_link, clientAgent):
return return
def sanitizeName(name): def sanitize_name(name):
""" """
>>> sanitizeName('a/b/c') >>> sanitize_name('a/b/c')
'a-b-c' 'a-b-c'
>>> sanitizeName('abc') >>> sanitize_name('abc')
'abc' 'abc'
>>> sanitizeName('a"b') >>> sanitize_name('a"b')
'ab' 'ab'
>>> sanitizeName('.a.b..') >>> sanitize_name('.a.b..')
'a.b' 'a.b'
""" """
@ -78,7 +78,7 @@ def sanitizeName(name):
return name return name
def makeDir(path): def make_dir(path):
if not os.path.isdir(path): if not os.path.isdir(path):
try: try:
os.makedirs(path) os.makedirs(path)
@ -87,7 +87,7 @@ def makeDir(path):
return True return True
def remoteDir(path): def remote_dir(path):
if not core.REMOTEPATHS: if not core.REMOTEPATHS:
return path return path
for local, remote in core.REMOTEPATHS: for local, remote in core.REMOTEPATHS:
@ -151,10 +151,10 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
input_directory = os.path.join(input_directory, input_name) input_directory = os.path.join(input_directory, input_name)
logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory))
tordir = True tordir = True
elif input_name and os.path.isdir(os.path.join(input_directory, sanitizeName(input_name))): elif input_name and os.path.isdir(os.path.join(input_directory, sanitize_name(input_name))):
logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format( logger.info("SEARCH: Found torrent directory {0} in input directory directory {1}".format(
sanitizeName(input_name), input_directory)) sanitize_name(input_name), input_directory))
input_directory = os.path.join(input_directory, sanitizeName(input_name)) input_directory = os.path.join(input_directory, sanitize_name(input_name))
logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory))
tordir = True tordir = True
elif input_name and os.path.isfile(os.path.join(input_directory, input_name)): elif input_name and os.path.isfile(os.path.join(input_directory, input_name)):
@ -162,10 +162,10 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
input_directory = os.path.join(input_directory, input_name) input_directory = os.path.join(input_directory, input_name)
logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory))
tordir = True tordir = True
elif input_name and os.path.isfile(os.path.join(input_directory, sanitizeName(input_name))): elif input_name and os.path.isfile(os.path.join(input_directory, sanitize_name(input_name))):
logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format( logger.info("SEARCH: Found torrent file {0} in input directory directory {1}".format(
sanitizeName(input_name), input_directory)) sanitize_name(input_name), input_directory))
input_directory = os.path.join(input_directory, sanitizeName(input_name)) input_directory = os.path.join(input_directory, sanitize_name(input_name))
logger.info("SEARCH: Setting input_directory to {0}".format(input_directory)) logger.info("SEARCH: Setting input_directory to {0}".format(input_directory))
tordir = True tordir = True
@ -187,7 +187,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
pass pass
if input_name and not tordir: if input_name and not tordir:
if input_name in pathlist or sanitizeName(input_name) in pathlist: if input_name in pathlist or sanitize_name(input_name) in pathlist:
logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(input_name)) logger.info("SEARCH: Found torrent directory {0} in the directory structure".format(input_name))
tordir = True tordir = True
else: else:
@ -202,23 +202,23 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
return input_directory, input_name, input_category, root return input_directory, input_name, input_category, root
def getDirSize(inputPath): def get_dir_size(inputPath):
from functools import partial from functools import partial
prepend = partial(os.path.join, inputPath) prepend = partial(os.path.join, inputPath)
return sum([ return sum([
(os.path.getsize(f) if os.path.isfile(f) else getDirSize(f)) (os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f))
for f in map(prepend, os.listdir(text_type(inputPath))) for f in map(prepend, os.listdir(text_type(inputPath)))
]) ])
def is_minSize(inputName, minSize): def is_min_size(inputName, minSize):
file_name, file_ext = os.path.splitext(os.path.basename(inputName)) file_name, file_ext = os.path.splitext(os.path.basename(inputName))
# audio files we need to check directory size not file size # audio files we need to check directory size not file size
input_size = os.path.getsize(inputName) input_size = os.path.getsize(inputName)
if file_ext in core.AUDIOCONTAINER: if file_ext in core.AUDIOCONTAINER:
try: try:
input_size = getDirSize(os.path.dirname(inputName)) input_size = get_dir_size(os.path.dirname(inputName))
except: except:
logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE')
return True return True
@ -249,7 +249,7 @@ def copy_link(src, targetLink, useLink):
logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK') logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK')
return True return True
makeDir(os.path.dirname(targetLink)) make_dir(os.path.dirname(targetLink))
try: try:
if useLink == 'dir': if useLink == 'dir':
logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK')
@ -311,7 +311,7 @@ def replace_links(link):
def flatten(outputDestination): def flatten(outputDestination):
logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination))
for outputFile in listMediaFiles(outputDestination): for outputFile in list_media_files(outputDestination):
dir_path = os.path.dirname(outputFile) dir_path = os.path.dirname(outputFile)
file_name = os.path.basename(outputFile) file_name = os.path.basename(outputFile)
@ -325,10 +325,10 @@ def flatten(outputDestination):
except: except:
logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN') logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN')
removeEmptyFolders(outputDestination) # Cleanup empty directories remove_empty_folders(outputDestination) # Cleanup empty directories
def removeEmptyFolders(path, removeRoot=True): def remove_empty_folders(path, removeRoot=True):
"""Function to remove empty folders""" """Function to remove empty folders"""
if not os.path.isdir(path): if not os.path.isdir(path):
return return
@ -340,7 +340,7 @@ def removeEmptyFolders(path, removeRoot=True):
for f in files: for f in files:
fullpath = os.path.join(path, f) fullpath = os.path.join(path, f)
if os.path.isdir(fullpath): if os.path.isdir(fullpath):
removeEmptyFolders(fullpath) remove_empty_folders(fullpath)
# if folder empty, delete it # if folder empty, delete it
files = os.listdir(text_type(path)) files = os.listdir(text_type(path))
@ -349,7 +349,7 @@ def removeEmptyFolders(path, removeRoot=True):
os.rmdir(path) os.rmdir(path)
def rmReadOnly(filename): def remove_read_only(filename):
if os.path.isfile(filename): if os.path.isfile(filename):
# check first the read-only attribute # check first the read-only attribute
file_attribute = os.stat(filename)[0] file_attribute = os.stat(filename)[0]
@ -364,7 +364,7 @@ def rmReadOnly(filename):
# Wake function # Wake function
def WakeOnLan(ethernet_address): def wake_on_lan(ethernet_address):
addr_byte = ethernet_address.split(':') addr_byte = ethernet_address.split(':')
hw_addr = struct.pack(b'BBBBBB', int(addr_byte[0], 16), hw_addr = struct.pack(b'BBBBBB', int(addr_byte[0], 16),
int(addr_byte[1], 16), int(addr_byte[1], 16),
@ -386,7 +386,7 @@ def WakeOnLan(ethernet_address):
# Test Connection function # Test Connection function
def TestCon(host, port): def test_connection(host, port):
try: try:
socket.create_connection((host, port)) socket.create_connection((host, port))
return "Up" return "Up"
@ -394,26 +394,26 @@ def TestCon(host, port):
return "Down" return "Down"
def WakeUp(): def wake_up():
host = core.CFG["WakeOnLan"]["host"] host = core.CFG["WakeOnLan"]["host"]
port = int(core.CFG["WakeOnLan"]["port"]) port = int(core.CFG["WakeOnLan"]["port"])
mac = core.CFG["WakeOnLan"]["mac"] mac = core.CFG["WakeOnLan"]["mac"]
i = 1 i = 1
while TestCon(host, port) == "Down" and i < 4: while test_connection(host, port) == "Down" and i < 4:
logger.info(("Sending WakeOnLan Magic Packet for mac: {0}".format(mac))) logger.info(("Sending WakeOnLan Magic Packet for mac: {0}".format(mac)))
WakeOnLan(mac) wake_on_lan(mac)
time.sleep(20) time.sleep(20)
i = i + 1 i = i + 1
if TestCon(host, port) == "Down": # final check. if test_connection(host, port) == "Down": # final check.
logger.warning("System with mac: {0} has not woken after 3 attempts. " logger.warning("System with mac: {0} has not woken after 3 attempts. "
"Continuing with the rest of the script.".format(mac)) "Continuing with the rest of the script.".format(mac))
else: else:
logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac)) logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac))
def CharReplace(Name): def char_replace(Name):
name = Name name = Name
# Special character hex range: # Special character hex range:
# CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15) # CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15)
@ -464,13 +464,13 @@ def convert_to_ascii(inputName, dirName):
if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!. if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!.
return input_name, dir_name return input_name, dir_name
encoded, input_name = CharReplace(input_name) encoded, input_name = char_replace(input_name)
dir, base = os.path.split(dir_name) dir, base = os.path.split(dir_name)
if not base: # ended with "/" if not base: # ended with "/"
dir, base = os.path.split(dir) dir, base = os.path.split(dir)
encoded, base2 = CharReplace(base) encoded, base2 = char_replace(base)
if encoded: if encoded:
dir_name = os.path.join(dir, base2) dir_name = os.path.join(dir, base2)
logger.info("Renaming directory to: {0}.".format(base2), 'ENCODER') logger.info("Renaming directory to: {0}.".format(base2), 'ENCODER')
@ -480,14 +480,14 @@ def convert_to_ascii(inputName, dirName):
for dirname, dirnames, filenames in os.walk(dir_name, topdown=False): for dirname, dirnames, filenames in os.walk(dir_name, topdown=False):
for subdirname in dirnames: for subdirname in dirnames:
encoded, subdirname2 = CharReplace(subdirname) encoded, subdirname2 = char_replace(subdirname)
if encoded: if encoded:
logger.info("Renaming directory to: {0}.".format(subdirname2), 'ENCODER') logger.info("Renaming directory to: {0}.".format(subdirname2), 'ENCODER')
os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2)) os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2))
for dirname, dirnames, filenames in os.walk(dir_name): for dirname, dirnames, filenames in os.walk(dir_name):
for filename in filenames: for filename in filenames:
encoded, filename2 = CharReplace(filename) encoded, filename2 = char_replace(filename)
if encoded: if encoded:
logger.info("Renaming file to: {0}.".format(filename2), 'ENCODER') logger.info("Renaming file to: {0}.".format(filename2), 'ENCODER')
os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2)) os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2))
@ -646,10 +646,10 @@ def parse_args(clientAgent, args):
return None, None, None, None, None return None, None, None, None, None
def getDirs(section, subsection, link='hard'): def get_dirs(section, subsection, link='hard'):
to_return = [] to_return = []
def processDir(path): def process_dir(path):
folders = [] folders = []
logger.info("Searching {0} for mediafiles to post-process ...".format(path)) logger.info("Searching {0} for mediafiles to post-process ...".format(path))
@ -674,7 +674,7 @@ def getDirs(section, subsection, link='hard'):
album = f.album album = f.album
# create new path # create new path
new_path = os.path.join(path, "{0} - {1}".format(sanitizeName(artist), sanitizeName(album))) new_path = os.path.join(path, "{0} - {1}".format(sanitize_name(artist), sanitize_name(album)))
elif file_ext in core.MEDIACONTAINER: elif file_ext in core.MEDIACONTAINER:
f = guessit.guessit(mediafile) f = guessit.guessit(mediafile)
@ -684,13 +684,13 @@ def getDirs(section, subsection, link='hard'):
if not title: if not title:
title = os.path.splitext(os.path.basename(mediafile))[0] title = os.path.splitext(os.path.basename(mediafile))[0]
new_path = os.path.join(path, sanitizeName(title)) new_path = os.path.join(path, sanitize_name(title))
except Exception as e: except Exception as e:
logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e)) logger.error("Exception parsing name for media file: {0}: {1}".format(os.path.split(mediafile)[1], e))
if not new_path: if not new_path:
title = os.path.splitext(os.path.basename(mediafile))[0] title = os.path.splitext(os.path.basename(mediafile))[0]
new_path = os.path.join(path, sanitizeName(title)) new_path = os.path.join(path, sanitize_name(title))
try: try:
new_path = new_path.encode(core.SYS_ENCODING) new_path = new_path.encode(core.SYS_ENCODING)
@ -704,9 +704,9 @@ def getDirs(section, subsection, link='hard'):
# create new path if it does not exist # create new path if it does not exist
if not os.path.exists(new_path): if not os.path.exists(new_path):
makeDir(new_path) make_dir(new_path)
newfile = os.path.join(new_path, sanitizeName(os.path.split(mediafile)[1])) newfile = os.path.join(new_path, sanitize_name(os.path.split(mediafile)[1]))
try: try:
newfile = newfile.encode(core.SYS_ENCODING) newfile = newfile.encode(core.SYS_ENCODING)
except: except:
@ -731,9 +731,9 @@ def getDirs(section, subsection, link='hard'):
try: try:
watch_dir = os.path.join(core.CFG[section][subsection]["watch_dir"], subsection) watch_dir = os.path.join(core.CFG[section][subsection]["watch_dir"], subsection)
if os.path.exists(watch_dir): if os.path.exists(watch_dir):
to_return.extend(processDir(watch_dir)) to_return.extend(process_dir(watch_dir))
elif os.path.exists(core.CFG[section][subsection]["watch_dir"]): elif os.path.exists(core.CFG[section][subsection]["watch_dir"]):
to_return.extend(processDir(core.CFG[section][subsection]["watch_dir"])) to_return.extend(process_dir(core.CFG[section][subsection]["watch_dir"]))
except Exception as e: except Exception as e:
logger.error("Failed to add directories from {0} for post-processing: {1}".format logger.error("Failed to add directories from {0} for post-processing: {1}".format
(core.CFG[section][subsection]["watch_dir"], e)) (core.CFG[section][subsection]["watch_dir"], e))
@ -742,7 +742,7 @@ def getDirs(section, subsection, link='hard'):
try: try:
output_directory = os.path.join(core.OUTPUTDIRECTORY, subsection) output_directory = os.path.join(core.OUTPUTDIRECTORY, subsection)
if os.path.exists(output_directory): if os.path.exists(output_directory):
to_return.extend(processDir(output_directory)) to_return.extend(process_dir(output_directory))
except Exception as e: except Exception as e:
logger.error("Failed to add directories from {0} for post-processing: {1}".format(core.OUTPUTDIRECTORY, e)) logger.error("Failed to add directories from {0} for post-processing: {1}".format(core.OUTPUTDIRECTORY, e))
@ -771,7 +771,7 @@ def onerror(func, path, exc_info):
raise Exception raise Exception
def rmDir(dirName): def remove_dir(dirName):
logger.info("Deleting {0}".format(dirName)) logger.info("Deleting {0}".format(dirName))
try: try:
shutil.rmtree(text_type(dirName), onerror=onerror) shutil.rmtree(text_type(dirName), onerror=onerror)
@ -779,19 +779,19 @@ def rmDir(dirName):
logger.error("Unable to delete folder {0}".format(dirName)) logger.error("Unable to delete folder {0}".format(dirName))
def cleanDir(path, section, subsection): def clean_dir(path, section, subsection):
cfg = dict(core.CFG[section][subsection]) cfg = dict(core.CFG[section][subsection])
if not os.path.exists(path): if not os.path.exists(path):
logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR') logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR')
return return
if core.FORCE_CLEAN and not core.FAILED: if core.FORCE_CLEAN and not core.FAILED:
logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR') logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR')
rmDir(path) remove_dir(path)
return return
min_size = int(cfg.get('minSize', 0)) min_size = int(cfg.get('minSize', 0))
delete_ignored = int(cfg.get('delete_ignored', 0)) delete_ignored = int(cfg.get('delete_ignored', 0))
try: try:
num_files = len(listMediaFiles(path, minSize=min_size, delete_ignored=delete_ignored)) num_files = len(list_media_files(path, minSize=min_size, delete_ignored=delete_ignored))
except: except:
num_files = 'unknown' num_files = 'unknown'
if num_files > 0: if num_files > 0:
@ -994,7 +994,7 @@ def get_nzoid(inputName):
return nzoid return nzoid
def cleanFileName(filename): def clean_file_name(filename):
"""Cleans up nzb name by removing any . and _ """Cleans up nzb name by removing any . and _
characters, along with any trailing hyphens. characters, along with any trailing hyphens.
@ -1020,7 +1020,7 @@ def is_archive_file(filename):
return False return False
def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]):
file_name, file_ext = os.path.splitext(mediafile) file_name, file_ext = os.path.splitext(mediafile)
try: try:
@ -1039,14 +1039,14 @@ def isMediaFile(mediafile, media=True, audio=True, meta=True, archives=True, oth
return False return False
def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]):
files = [] files = []
if not os.path.isdir(path): if not os.path.isdir(path):
if os.path.isfile(path): # Single file downloads. if os.path.isfile(path): # Single file downloads.
cur_file = os.path.split(path)[1] cur_file = os.path.split(path)[1]
if isMediaFile(cur_file, media, audio, meta, archives, other, otherext): if is_media_file(cur_file, media, audio, meta, archives, other, otherext):
# Optionally ignore sample files # Optionally ignore sample files
if is_sample(path) or not is_minSize(path, minSize): if is_sample(path) or not is_min_size(path, minSize):
if delete_ignored == 1: if delete_ignored == 1:
try: try:
os.unlink(path) os.unlink(path)
@ -1064,11 +1064,11 @@ def listMediaFiles(path, minSize=0, delete_ignored=0, media=True, audio=True, me
# if it's a folder do it recursively # if it's a folder do it recursively
if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): if os.path.isdir(full_cur_file) and not cur_file.startswith('.'):
files += listMediaFiles(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) files += list_media_files(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext)
elif isMediaFile(cur_file, media, audio, meta, archives, other, otherext): elif is_media_file(cur_file, media, audio, meta, archives, other, otherext):
# Optionally ignore sample files # Optionally ignore sample files
if is_sample(full_cur_file) or not is_minSize(full_cur_file, minSize): if is_sample(full_cur_file) or not is_min_size(full_cur_file, minSize):
if delete_ignored == 1: if delete_ignored == 1:
try: try:
os.unlink(full_cur_file) os.unlink(full_cur_file)
@ -1160,11 +1160,11 @@ def find_imdbid(dirName, inputName, omdbApiKey):
return imdbid return imdbid
def extractFiles(src, dst=None, keep_archive=None): def extract_files(src, dst=None, keep_archive=None):
extracted_folder = [] extracted_folder = []
extracted_archive = [] extracted_archive = []
for inputFile in listMediaFiles(src, media=False, audio=False, meta=False, archives=True): for inputFile in list_media_files(src, media=False, audio=False, meta=False, archives=True):
dir_path = os.path.dirname(inputFile) dir_path = os.path.dirname(inputFile)
full_file_name = os.path.basename(inputFile) full_file_name = os.path.basename(inputFile)
archive_name = os.path.splitext(full_file_name)[0] archive_name = os.path.splitext(full_file_name)[0]
@ -1181,7 +1181,7 @@ def extractFiles(src, dst=None, keep_archive=None):
logger.error("Extraction failed for: {0}".format(full_file_name)) logger.error("Extraction failed for: {0}".format(full_file_name))
for folder in extracted_folder: for folder in extracted_folder:
for inputFile in listMediaFiles(folder, media=False, audio=False, meta=False, archives=True): for inputFile in list_media_files(folder, media=False, audio=False, meta=False, archives=True):
full_file_name = os.path.basename(inputFile) full_file_name = os.path.basename(inputFile)
archive_name = os.path.splitext(full_file_name)[0] archive_name = os.path.splitext(full_file_name)[0]
archive_name = re.sub(r"part[0-9]+", "", archive_name) archive_name = re.sub(r"part[0-9]+", "", archive_name)
@ -1258,7 +1258,7 @@ def plex_update(category):
logger.debug("Could not identify section for plex update", 'PLEX') logger.debug("Could not identify section for plex update", 'PLEX')
def backupVersionedFile(old_file, version): def backup_versioned_file(old_file, version):
num_tries = 0 num_tries = 0
new_file = '{old}.v{version}'.format(old=old_file, version=version) new_file = '{old}.v{version}'.format(old=old_file, version=version)
@ -1287,7 +1287,7 @@ def backupVersionedFile(old_file, version):
return True return True
def update_downloadInfoStatus(inputName, status): def update_download_info_status(inputName, status):
logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status))
my_db = nzbToMediaDB.DBConnection() my_db = nzbToMediaDB.DBConnection()
@ -1295,7 +1295,7 @@ def update_downloadInfoStatus(inputName, status):
[status, datetime.date.today().toordinal(), text_type(inputName)]) [status, datetime.date.today().toordinal(), text_type(inputName)])
def get_downloadInfo(inputName, status): def get_download_info(inputName, status):
logger.db("Getting download info for {0} from the DB".format(inputName)) logger.db("Getting download info for {0} from the DB".format(inputName))
my_db = nzbToMediaDB.DBConnection() my_db = nzbToMediaDB.DBConnection()

View file

@ -13,17 +13,17 @@ from six import iteritems, text_type, string_types
import core import core
from core import logger from core import logger
from core.nzbToMediaUtil import makeDir from core.nzbToMediaUtil import make_dir
def isVideoGood(videofile, status): def is_video_good(videofile, status):
file_name_ext = os.path.basename(videofile) file_name_ext = os.path.basename(videofile)
file_name, file_ext = os.path.splitext(file_name_ext) file_name, file_ext = os.path.splitext(file_name_ext)
disable = False disable = False
if file_ext not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED): if file_ext not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED):
disable = True disable = True
else: else:
test_details, res = getVideoDetails(core.TEST_FILE) test_details, res = get_video_details(core.TEST_FILE)
if res != 0 or test_details.get("error"): if res != 0 or test_details.get("error"):
disable = True disable = True
logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER') logger.info("DISABLED: ffprobe failed to analyse test file. Stopping corruption check.", 'TRANSCODER')
@ -41,7 +41,7 @@ def isVideoGood(videofile, status):
return True return True
logger.info('Checking [{0}] for corruption, please stand by ...'.format(file_name_ext), 'TRANSCODER') logger.info('Checking [{0}] for corruption, please stand by ...'.format(file_name_ext), 'TRANSCODER')
video_details, result = getVideoDetails(videofile) video_details, result = get_video_details(videofile)
if result != 0: if result != 0:
logger.error("FAILED: [{0}] is corrupted!".format(file_name_ext), 'TRANSCODER') logger.error("FAILED: [{0}] is corrupted!".format(file_name_ext), 'TRANSCODER')
@ -72,7 +72,7 @@ def zip_out(file, img, bitbucket):
return procin return procin
def getVideoDetails(videofile, img=None, bitbucket=None): def get_video_details(videofile, img=None, bitbucket=None):
video_details = {} video_details = {}
result = 1 result = 1
file = videofile file = videofile
@ -116,12 +116,12 @@ def getVideoDetails(videofile, img=None, bitbucket=None):
return video_details, result return video_details, result
def buildCommands(file, newDir, movieName, bitbucket): def build_commands(file, newDir, movieName, bitbucket):
if isinstance(file, string_types): if isinstance(file, string_types):
input_file = file input_file = file
if 'concat:' in file: if 'concat:' in file:
file = file.split('|')[0].replace('concat:', '') file = file.split('|')[0].replace('concat:', '')
video_details, result = getVideoDetails(file) video_details, result = get_video_details(file)
dir, name = os.path.split(file) dir, name = os.path.split(file)
name, ext = os.path.splitext(name) name, ext = os.path.splitext(name)
check = re.match("VTS_([0-9][0-9])_[0-9]+", name) check = re.match("VTS_([0-9][0-9])_[0-9]+", name)
@ -136,7 +136,7 @@ def buildCommands(file, newDir, movieName, bitbucket):
else: else:
img, data = next(iteritems(file)) img, data = next(iteritems(file))
name = data['name'] name = data['name']
video_details, result = getVideoDetails(data['files'][0], img, bitbucket) video_details, result = get_video_details(data['files'][0], img, bitbucket)
input_file = '-' input_file = '-'
file = '-' file = '-'
@ -471,7 +471,7 @@ def buildCommands(file, newDir, movieName, bitbucket):
if core.SEMBED and os.path.isfile(file): if core.SEMBED and os.path.isfile(file):
for subfile in get_subs(file): for subfile in get_subs(file):
sub_details, result = getVideoDetails(subfile) sub_details, result = get_video_details(subfile)
if not sub_details or not sub_details.get("streams"): if not sub_details or not sub_details.get("streams"):
continue continue
if core.SCODEC == "mov_text": if core.SCODEC == "mov_text":
@ -528,7 +528,7 @@ def get_subs(file):
def extract_subs(file, newfilePath, bitbucket): def extract_subs(file, newfilePath, bitbucket):
video_details, result = getVideoDetails(file) video_details, result = get_video_details(file)
if not video_details: if not video_details:
return return
@ -586,7 +586,7 @@ def extract_subs(file, newfilePath, bitbucket):
logger.error("Extracting subtitles has failed") logger.error("Extracting subtitles has failed")
def processList(List, newDir, bitbucket): def process_list(List, newDir, bitbucket):
rem_list = [] rem_list = []
new_list = [] new_list = []
combine = [] combine = []
@ -596,7 +596,7 @@ def processList(List, newDir, bitbucket):
ext = os.path.splitext(item)[1].lower() ext = os.path.splitext(item)[1].lower()
if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS:
logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER")
new_list.extend(ripISO(item, newDir, bitbucket)) new_list.extend(rip_iso(item, newDir, bitbucket))
rem_list.append(item) rem_list.append(item)
elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS:
logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER")
@ -614,9 +614,9 @@ def processList(List, newDir, bitbucket):
else: else:
continue continue
if vts_path: if vts_path:
new_list.extend(combineVTS(vts_path)) new_list.extend(combine_vts(vts_path))
if combine: if combine:
new_list.extend(combineCD(combine)) new_list.extend(combine_cd(combine))
for file in new_list: for file in new_list:
if isinstance(file, string_types) and 'concat:' not in file and not os.path.isfile(file): if isinstance(file, string_types) and 'concat:' not in file and not os.path.isfile(file):
success = False success = False
@ -633,7 +633,7 @@ def processList(List, newDir, bitbucket):
return List, rem_list, new_list, success return List, rem_list, new_list, success
def ripISO(item, newDir, bitbucket): def rip_iso(item, newDir, bitbucket):
new_files = [] new_files = []
failure_dir = 'failure' failure_dir = 'failure'
# Mount the ISO in your OS and call combineVTS. # Mount the ISO in your OS and call combineVTS.
@ -681,7 +681,7 @@ def ripISO(item, newDir, bitbucket):
return new_files return new_files
def combineVTS(vtsPath): def combine_vts(vtsPath):
new_files = [] new_files = []
combined = '' combined = ''
for n in range(99): for n in range(99):
@ -705,7 +705,7 @@ def combineVTS(vtsPath):
return new_files return new_files
def combineCD(combine): def combine_cd(combine):
new_files = [] new_files = []
for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]): for item in set([re.match("(.+)[cC][dD][0-9].", item).groups()[0] for item in combine]):
concat = '' concat = ''
@ -728,17 +728,17 @@ def print_cmd(command):
logger.debug("calling command:{0}".format(cmd)) logger.debug("calling command:{0}".format(cmd))
def Transcode_directory(dirName): def transcode_directory(dirName):
if not core.FFMPEG: if not core.FFMPEG:
return 1, dirName return 1, dirName
logger.info("Checking for files to be transcoded") logger.info("Checking for files to be transcoded")
final_result = 0 # initialize as successful final_result = 0 # initialize as successful
if core.OUTPUTVIDEOPATH: if core.OUTPUTVIDEOPATH:
new_dir = core.OUTPUTVIDEOPATH new_dir = core.OUTPUTVIDEOPATH
makeDir(new_dir) make_dir(new_dir)
name = os.path.splitext(os.path.split(dirName)[1])[0] name = os.path.splitext(os.path.split(dirName)[1])[0]
new_dir = os.path.join(new_dir, name) new_dir = os.path.join(new_dir, name)
makeDir(new_dir) make_dir(new_dir)
else: else:
new_dir = dirName new_dir = dirName
if platform.system() == 'Windows': if platform.system() == 'Windows':
@ -746,8 +746,8 @@ def Transcode_directory(dirName):
else: else:
bitbucket = open('/dev/null') bitbucket = open('/dev/null')
movie_name = os.path.splitext(os.path.split(dirName)[1])[0] movie_name = os.path.splitext(os.path.split(dirName)[1])[0]
file_list = core.listMediaFiles(dirName, media=True, audio=False, meta=False, archives=False) file_list = core.list_media_files(dirName, media=True, audio=False, meta=False, archives=False)
file_list, rem_list, new_list, success = processList(file_list, new_dir, bitbucket) file_list, rem_list, new_list, success = process_list(file_list, new_dir, bitbucket)
if not success: if not success:
bitbucket.close() bitbucket.close()
return 1, dirName return 1, dirName
@ -755,7 +755,7 @@ def Transcode_directory(dirName):
for file in file_list: for file in file_list:
if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS:
continue continue
command = buildCommands(file, new_dir, movie_name, bitbucket) command = build_commands(file, new_dir, movie_name, bitbucket)
newfile_path = command[-1] newfile_path = command[-1]
# transcoding files may remove the original file, so make sure to extract subtitles first # transcoding files may remove the original file, so make sure to extract subtitles first

View file

@ -635,7 +635,7 @@ from core.autoProcess.autoProcessMovie import autoProcessMovie
from core.autoProcess.autoProcessMusic import autoProcessMusic from core.autoProcess.autoProcessMusic import autoProcessMusic
from core.autoProcess.autoProcessTV import autoProcessTV from core.autoProcess.autoProcessTV import autoProcessTV
from core.nzbToMediaUserScript import external_script from core.nzbToMediaUserScript import external_script
from core.nzbToMediaUtil import CharReplace, cleanDir, convert_to_ascii, extractFiles, getDirs, get_downloadInfo, get_nzoid, plex_update, update_downloadInfoStatus from core.nzbToMediaUtil import char_replace, clean_dir, convert_to_ascii, extract_files, get_dirs, get_download_info, get_nzoid, plex_update, update_download_info_status
try: try:
text_type = unicode text_type = unicode
@ -666,8 +666,8 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
input_name1 = input_name input_name1 = input_name
try: try:
encoded, input_directory1 = CharReplace(input_directory) encoded, input_directory1 = char_replace(input_directory)
encoded, input_name1 = CharReplace(input_name) encoded, input_name1 = char_replace(input_name)
except: except:
pass pass
@ -727,7 +727,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
if extract == 1: if extract == 1:
logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory)) logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory))
extractFiles(input_directory) extract_files(input_directory)
logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name))
@ -735,13 +735,13 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
result = autoProcessMovie().process(section_name, input_directory, input_name, status, clientAgent, download_id, result = autoProcessMovie().process(section_name, input_directory, input_name, status, clientAgent, download_id,
input_category, failureLink) input_category, failureLink)
elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]:
result = autoProcessTV().processEpisode(section_name, input_directory, input_name, status, clientAgent, result = autoProcessTV().process_episode(section_name, input_directory, input_name, status, clientAgent,
download_id, input_category, failureLink) download_id, input_category, failureLink)
elif section_name in ["HeadPhones", "Lidarr"]: elif section_name in ["HeadPhones", "Lidarr"]:
result = autoProcessMusic().process(section_name, input_directory, input_name, status, clientAgent, input_category) result = autoProcessMusic().process(section_name, input_directory, input_name, status, clientAgent, input_category)
elif section_name == "Mylar": elif section_name == "Mylar":
result = autoProcessComics().processEpisode(section_name, input_directory, input_name, status, clientAgent, result = autoProcessComics().process_episode(section_name, input_directory, input_name, status, clientAgent,
input_category) input_category)
elif section_name == "Gamez": elif section_name == "Gamez":
result = autoProcessGames().process(section_name, input_directory, input_name, status, clientAgent, input_category) result = autoProcessGames().process(section_name, input_directory, input_name, status, clientAgent, input_category)
elif section_name == 'UserScript': elif section_name == 'UserScript':
@ -754,10 +754,10 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
if result[0] == 0: if result[0] == 0:
if clientAgent != 'manual': if clientAgent != 'manual':
# update download status in our DB # update download status in our DB
update_downloadInfoStatus(input_name, 1) update_download_info_status(input_name, 1)
if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']:
# cleanup our processing folders of any misc unwanted files and empty directories # cleanup our processing folders of any misc unwanted files and empty directories
cleanDir(input_directory, section_name, input_category) clean_dir(input_directory, section_name, input_category)
return result return result
@ -879,11 +879,11 @@ def main(args, section=None):
for subsection in subsections: for subsection in subsections:
if not core.CFG[section][subsection].isenabled(): if not core.CFG[section][subsection].isenabled():
continue continue
for dir_name in getDirs(section, subsection, link='move'): for dir_name in get_dirs(section, subsection, link='move'):
logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dir_name)) logger.info("Starting manual run for {0}:{1} - Folder: {2}".format(section, subsection, dir_name))
logger.info("Checking database for download info for {0} ...".format(os.path.basename(dir_name))) logger.info("Checking database for download info for {0} ...".format(os.path.basename(dir_name)))
core.DOWNLOADINFO = get_downloadInfo(os.path.basename(dir_name), 0) core.DOWNLOADINFO = get_download_info(os.path.basename(dir_name), 0)
if core.DOWNLOADINFO: if core.DOWNLOADINFO:
logger.info("Found download info for {0}, " logger.info("Found download info for {0}, "
"setting variables now ...".format "setting variables now ...".format

View file

@ -5,7 +5,7 @@ import guessit
import requests import requests
import core import core
from core.nzbToMediaAutoFork import autoFork from core.nzbToMediaAutoFork import auto_fork
from core.nzbToMediaUtil import server_responding from core.nzbToMediaUtil import server_responding
from core.transcoder import transcoder from core.transcoder import transcoder
@ -15,7 +15,7 @@ core.initialize()
#label = core.TORRENT_CLASS.core.get_torrent_status("f33a9c4b15cbd9170722d700069af86746817ade", ["label"]).get()['label'] #label = core.TORRENT_CLASS.core.get_torrent_status("f33a9c4b15cbd9170722d700069af86746817ade", ["label"]).get()['label']
#print label #print label
if transcoder.isVideoGood(core.TEST_FILE, 0): if transcoder.is_video_good(core.TEST_FILE, 0):
print("FFPROBE Works") print("FFPROBE Works")
else: else:
print("FFPROBE FAILED") print("FFPROBE FAILED")
@ -25,7 +25,7 @@ print(test)
section = core.CFG.findsection('tv').isenabled() section = core.CFG.findsection('tv').isenabled()
print(section) print(section)
print(len(section)) print(len(section))
fork, fork_params = autoFork('SickBeard', 'tv') fork, fork_params = auto_fork('SickBeard', 'tv')
if server_responding("http://127.0.0.1:5050"): if server_responding("http://127.0.0.1:5050"):
print("CouchPotato Running") print("CouchPotato Running")