PEP8 Argument should be lowercase

This commit is contained in:
Labrys of Knossos 2018-12-16 23:33:31 -05:00
parent 7f2a4d2605
commit 41fa636fc2
15 changed files with 326 additions and 350 deletions

View file

@ -11,16 +11,12 @@ from core.nzbToMediaUtil import char_replace, convert_to_ascii, plex_update, rep
from libs.six import text_type from libs.six import text_type
def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID, clientAgent): def process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent):
input_directory = inputDirectory
input_name = inputName
input_category = inputCategory
input_hash = inputHash
status = 1 # 1 = failed | 0 = success status = 1 # 1 = failed | 0 = success
root = 0 root = 0
found_file = 0 found_file = 0
if clientAgent != 'manual' and not core.DOWNLOADINFO: if client_agent != 'manual' and not core.DOWNLOADINFO:
logger.debug('Adding TORRENT download info for directory {0} to database'.format(input_directory)) logger.debug('Adding TORRENT download info for directory {0} to database'.format(input_directory))
my_db = nzbToMediaDB.DBConnection() my_db = nzbToMediaDB.DBConnection()
@ -37,8 +33,8 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID
control_value_dict = {"input_directory": text_type(input_directory1)} control_value_dict = {"input_directory": text_type(input_directory1)}
new_value_dict = {"input_name": text_type(input_name1), new_value_dict = {"input_name": text_type(input_name1),
"input_hash": text_type(input_hash), "input_hash": text_type(input_hash),
"input_id": text_type(inputID), "input_id": text_type(input_id),
"client_agent": text_type(clientAgent), "client_agent": text_type(client_agent),
"status": 0, "status": 0,
"last_update": datetime.date.today().toordinal() "last_update": datetime.date.today().toordinal()
} }
@ -102,8 +98,8 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID
extensions = section.get('user_script_mediaExtensions', "").lower().split(',') extensions = section.get('user_script_mediaExtensions', "").lower().split(',')
unique_path = int(section.get("unique_path", 1)) unique_path = int(section.get("unique_path", 1))
if clientAgent != 'manual': if client_agent != 'manual':
core.pause_torrent(clientAgent, input_hash, inputID, input_name) core.pause_torrent(client_agent, input_hash, input_id, input_name)
# In case input is not directory, make sure to create one. # In case input is not directory, make sure to create one.
# This way Processing is isolated. # This way Processing is isolated.
@ -237,21 +233,21 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID
elif section_name in ['CouchPotato', 'Radarr']: elif section_name in ['CouchPotato', 'Radarr']:
result = core.Movie().process(section_name, output_destination, input_name, result = core.Movie().process(section_name, output_destination, input_name,
status, clientAgent, input_hash, input_category) status, client_agent, input_hash, input_category)
elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']:
if input_hash: if input_hash:
input_hash = input_hash.upper() input_hash = input_hash.upper()
result = core.TV().process_episode(section_name, output_destination, input_name, result = core.TV().process_episode(section_name, output_destination, input_name,
status, clientAgent, input_hash, input_category) status, client_agent, input_hash, input_category)
elif section_name in ['HeadPhones', 'Lidarr']: elif section_name in ['HeadPhones', 'Lidarr']:
result = core.Music().process(section_name, output_destination, input_name, result = core.Music().process(section_name, output_destination, input_name,
status, clientAgent, input_category) status, client_agent, input_category)
elif section_name == 'Mylar': elif section_name == 'Mylar':
result = core.Comic().process_episode(section_name, output_destination, input_name, result = core.Comic().process_episode(section_name, output_destination, input_name,
status, clientAgent, input_category) status, client_agent, input_category)
elif section_name == 'Gamez': elif section_name == 'Gamez':
result = core.Game().process(section_name, output_destination, input_name, result = core.Game().process(section_name, output_destination, input_name,
status, clientAgent, input_category) status, client_agent, input_category)
plex_update(input_category) plex_update(input_category)
@ -259,13 +255,13 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID
if not core.TORRENT_RESUME_ON_FAILURE: if not core.TORRENT_RESUME_ON_FAILURE:
logger.error("A problem was reported in the autoProcess* script. " logger.error("A problem was reported in the autoProcess* script. "
"Torrent won't resume seeding (settings)") "Torrent won't resume seeding (settings)")
elif clientAgent != 'manual': elif client_agent != 'manual':
logger.error("A problem was reported in the autoProcess* script. " logger.error("A problem was reported in the autoProcess* script. "
"If torrent was paused we will resume seeding") "If torrent was paused we will resume seeding")
core.resume_torrent(clientAgent, input_hash, inputID, input_name) core.resume_torrent(client_agent, input_hash, input_id, input_name)
else: else:
if clientAgent != 'manual': if client_agent != 'manual':
# update download status in our DB # update download status in our DB
core.update_download_info_status(input_name, 1) core.update_download_info_status(input_name, 1)
@ -276,7 +272,7 @@ def process_torrent(inputDirectory, inputName, inputCategory, inputHash, inputID
for file in files: for file in files:
logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file)))
replace_links(os.path.join(dirpath, file)) replace_links(os.path.join(dirpath, file))
core.remove_torrent(clientAgent, input_hash, inputID, input_name) core.remove_torrent(client_agent, input_hash, input_id, input_name)
if not section_name == 'UserScript': if not section_name == 'UserScript':
# for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN

View file

@ -12,14 +12,11 @@ requests.packages.urllib3.disable_warnings()
class Comic(object): class Comic(object):
def process_episode(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): def process_episode(self, section, dir_name, input_name=None, status=0, client_agent='manual', input_category=None):
dir_name = dirName
input_name = inputName
apc_version = "2.04" apc_version = "2.04"
comicrn_version = "1.01" comicrn_version = "1.01"
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][input_category])
host = cfg["host"] host = cfg["host"]
port = cfg["port"] port = cfg["port"]

View file

@ -13,12 +13,10 @@ requests.packages.urllib3.disable_warnings()
class Game(object): class Game(object):
def process(self, section, dirName, inputName=None, status=0, clientAgent='manual', inputCategory=None): def process(self, section, dir_name, input_name=None, status=0, client_agent='manual', input_category=None):
dir_name = dirName
input_name = inputName
status = int(status) status = int(status)
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][input_category])
host = cfg["host"] host = cfg["host"]
port = cfg["port"] port = cfg["port"]

View file

@ -16,23 +16,23 @@ requests.packages.urllib3.disable_warnings()
class Movie(object): class Movie(object):
def get_release(self, baseURL, imdbid=None, download_id=None, release_id=None): def get_release(self, base_url, imdb_id=None, download_id=None, release_id=None):
results = {} results = {}
params = {} params = {}
# determine cmd and params to send to CouchPotato to get our results # determine cmd and params to send to CouchPotato to get our results
section = 'movies' section = 'movies'
cmd = "media.list" cmd = "media.list"
if release_id or imdbid: if release_id or imdb_id:
section = 'media' section = 'media'
cmd = "media.get" cmd = "media.get"
params['id'] = release_id or imdbid params['id'] = release_id or imdb_id
if not (release_id or imdbid or download_id): if not (release_id or imdb_id or download_id):
logger.debug("No information available to filter CP results") logger.debug("No information available to filter CP results")
return results return results
url = "{0}{1}".format(baseURL, cmd) url = "{0}{1}".format(base_url, cmd)
logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params)) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params))
try: try:
@ -145,11 +145,9 @@ class Movie(object):
# ValueError catches simplejson's JSONDecodeError and json's ValueError # ValueError catches simplejson's JSONDecodeError and json's ValueError
return False return False
def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", download_id="", inputCategory=None, failureLink=None): def process(self, section, dir_name, input_name=None, status=0, client_agent="manual", download_id="", input_category=None, failure_link=None):
dir_name = dirName
input_name = inputName
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][input_category])
host = cfg["host"] host = cfg["host"]
port = cfg["port"] port = cfg["port"]
@ -244,10 +242,10 @@ class Movie(object):
logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section) logger.info("Status shown as success from Downloader, but corrupt video files found. Setting as failed.", section)
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if failureLink: if failure_link:
failureLink += '&corrupt=true' failure_link += '&corrupt=true'
status = 1 status = 1
elif clientAgent == "manual": elif client_agent == "manual":
logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section) logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section)
return [0, ""] # Success (as far as this script is concerned) return [0, ""] # Success (as far as this script is concerned)
else: else:
@ -275,7 +273,7 @@ class Movie(object):
if not release and ".cp(tt" not in video and imdbid: if not release and ".cp(tt" not in video and imdbid:
video_name, video_ext = os.path.splitext(video) video_name, video_ext = os.path.splitext(video)
video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext) video2 = "{0}.cp({1}){2}".format(video_name, imdbid, video_ext)
if not (clientAgent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): if not (client_agent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'):
logger.debug('Renaming: {0} to: {1}'.format(video, video2)) logger.debug('Renaming: {0} to: {1}'.format(video, video2))
os.rename(video, video2) os.rename(video, video2)
@ -285,7 +283,7 @@ class Movie(object):
params = {} params = {}
if download_id and release_id: if download_id and release_id:
params['downloader'] = downloader or clientAgent params['downloader'] = downloader or client_agent
params['download_id'] = download_id params['download_id'] = download_id
params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name
@ -343,8 +341,8 @@ class Movie(object):
else: else:
core.FAILED = True core.FAILED = True
logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section) logger.postprocess("FAILED DOWNLOAD DETECTED FOR {0}".format(input_name), section)
if failureLink: if failure_link:
report_nzb(failureLink, clientAgent) report_nzb(failure_link, client_agent)
if section == "Radarr": if section == "Radarr":
logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section) logger.postprocess("FAILED: The download failed. Sending failed download to {0} for CDH processing".format(section), section)

View file

@ -32,8 +32,8 @@ class Music(object):
logger.error("{0} did not return expected json data.".format(section), section) logger.error("{0} did not return expected json data.".format(section), section)
return None return None
def get_status(self, url, apikey, dirName): def get_status(self, url, apikey, dir_name):
logger.debug("Attempting to get current status for release:{0}".format(os.path.basename(dirName))) logger.debug("Attempting to get current status for release:{0}".format(os.path.basename(dir_name)))
params = { params = {
'apikey': apikey, 'apikey': apikey,
@ -55,13 +55,13 @@ class Music(object):
return None return None
for album in result: for album in result:
if os.path.basename(dirName) == album['FolderName']: if os.path.basename(dir_name) == album['FolderName']:
return album["Status"].lower() return album["Status"].lower()
def force_process(self, params, url, apikey, inputName, dirName, section, wait_for): def force_process(self, params, url, apikey, input_name, dir_name, section, wait_for):
release_status = self.get_status(url, apikey, dirName) release_status = self.get_status(url, apikey, dir_name)
if not release_status: if not release_status:
logger.error("Could not find a status for {0}, is it in the wanted list ?".format(inputName), section) logger.error("Could not find a status for {0}, is it in the wanted list ?".format(input_name), section)
logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section) logger.debug("Opening URL: {0} with PARAMS: {1}".format(url, params), section)
@ -77,32 +77,29 @@ class Music(object):
logger.error("Server returned status {0}".format(r.status_code), section) logger.error("Server returned status {0}".format(r.status_code), section)
return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)] return [1, "{0}: Failed to post-process - Server returned status {1}".format(section, r.status_code)]
elif r.text == "OK": elif r.text == "OK":
logger.postprocess("SUCCESS: Post-Processing started for {0} in folder {1} ...".format(inputName, dirName), section) logger.postprocess("SUCCESS: Post-Processing started for {0} in folder {1} ...".format(input_name, dir_name), section)
else: else:
logger.error("FAILED: Post-Processing has NOT started for {0} in folder {1}. exiting!".format(inputName, dirName), section) logger.error("FAILED: Post-Processing has NOT started for {0} in folder {1}. exiting!".format(input_name, dir_name), section)
return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)] return [1, "{0}: Failed to post-process - Returned log from {1} was not as expected.".format(section, section)]
# we will now wait for this album to be processed before returning to TorrentToMedia and unpausing. # we will now wait for this album to be processed before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * wait_for timeout = time.time() + 60 * wait_for
while time.time() < timeout: while time.time() < timeout:
current_status = self.get_status(url, apikey, dirName) current_status = self.get_status(url, apikey, dir_name)
if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie. if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie.
logger.postprocess("SUCCESS: This release is now marked as status [{0}]".format(current_status), section) logger.postprocess("SUCCESS: This release is now marked as status [{0}]".format(current_status), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] return [0, "{0}: Successfully post-processed {1}".format(section, input_name)]
if not os.path.isdir(dirName): if not os.path.isdir(dir_name):
logger.postprocess("SUCCESS: The input directory {0} has been removed Processing must have finished.".format(dirName), section) logger.postprocess("SUCCESS: The input directory {0} has been removed Processing must have finished.".format(dir_name), section)
return [0, "{0}: Successfully post-processed {1}".format(section, inputName)] return [0, "{0}: Successfully post-processed {1}".format(section, input_name)]
time.sleep(10 * wait_for) time.sleep(10 * wait_for)
# The status hasn't changed. # The status hasn't changed.
return [2, "no change"] return [2, "no change"]
def process(self, section, dirName, inputName=None, status=0, clientAgent="manual", inputCategory=None): def process(self, section, dir_name, input_name=None, status=0, client_agent="manual", input_category=None):
dir_name = dirName
input_name = inputName
status = int(status) status = int(status)
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][input_category])
host = cfg["host"] host = cfg["host"]
port = cfg["port"] port = cfg["port"]

View file

@ -52,9 +52,9 @@ class TV(object):
# ValueError catches simplejson's JSONDecodeError and json's ValueError # ValueError catches simplejson's JSONDecodeError and json's ValueError
return False return False
def process_episode(self, section, dirName, inputName=None, failed=False, clientAgent="manual", download_id=None, inputCategory=None, failureLink=None): def process_episode(self, section, dir_name, input_name=None, failed=False, client_agent="manual", download_id=None, input_category=None, failure_link=None):
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][input_category])
host = cfg["host"] host = cfg["host"]
port = cfg["port"] port = cfg["port"]
@ -67,7 +67,7 @@ class TV(object):
if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)): if server_responding("{0}{1}:{2}{3}".format(protocol, host, port, web_root)):
# auto-detect correct fork # auto-detect correct fork
fork, fork_params = auto_fork(section, inputCategory) fork, fork_params = auto_fork(section, input_category)
elif not username and not apikey: elif not username and not apikey:
logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only') logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only')
fork, fork_params = "None", {} fork, fork_params = "None", {}
@ -78,7 +78,7 @@ class TV(object):
delete_failed = int(cfg.get("delete_failed", 0)) delete_failed = int(cfg.get("delete_failed", 0))
nzb_extraction_by = cfg.get("nzbExtractionBy", "Downloader") nzb_extraction_by = cfg.get("nzbExtractionBy", "Downloader")
process_method = cfg.get("process_method") process_method = cfg.get("process_method")
if clientAgent == core.TORRENT_CLIENTAGENT and core.USELINK == "move-sym": if client_agent == core.TORRENT_CLIENTAGENT and core.USELINK == "move-sym":
process_method = "symlink" process_method = "symlink"
remote_path = int(cfg.get("remote_path", 0)) remote_path = int(cfg.get("remote_path", 0))
wait_for = int(cfg.get("wait_for", 2)) wait_for = int(cfg.get("wait_for", 2))
@ -113,7 +113,7 @@ class TV(object):
if e.errno != errno.EEXIST: if e.errno != errno.EEXIST:
raise raise
if 'process_method' not in fork_params or (clientAgent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != "Destination"): if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != "Destination"):
if input_name: if input_name:
process_all_exceptions(input_name, dir_name) process_all_exceptions(input_name, dir_name)
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
@ -147,9 +147,9 @@ class TV(object):
failed = 1 failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if failureLink: if failure_link:
failureLink += '&corrupt=true' failure_link += '&corrupt=true'
elif clientAgent == "manual": elif client_agent == "manual":
logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section) logger.warning("No media files found in directory {0} to manually process.".format(dir_name), section)
return [0, ""] # Success (as far as this script is concerned) return [0, ""] # Success (as far as this script is concerned)
elif nzb_extraction_by == "Destination": elif nzb_extraction_by == "Destination":
@ -248,8 +248,8 @@ class TV(object):
logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section) logger.postprocess("SUCCESS: The download succeeded, sending a post-process request", section)
else: else:
core.FAILED = True core.FAILED = True
if failureLink: if failure_link:
report_nzb(failureLink, clientAgent) report_nzb(failure_link, client_agent)
if 'failed' in fork_params: if 'failed' in fork_params:
logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section) logger.postprocess("FAILED: The download failed. Sending 'failed' process request to {0} branch".format(fork), section)
elif section == "NzbDrone": elif section == "NzbDrone":

View file

@ -11,7 +11,7 @@ from time import sleep
import core import core
def extract(filePath, outputDestination): def extract(file_path, output_destination):
success = 0 success = 0
# Using Windows # Using Windows
if platform.system() == 'Windows': if platform.system() == 'Windows':
@ -69,7 +69,7 @@ def extract(filePath, outputDestination):
if not extract_commands: if not extract_commands:
core.logger.warning("EXTRACTOR: No archive extracting programs found, plugin will be disabled") core.logger.warning("EXTRACTOR: No archive extracting programs found, plugin will be disabled")
ext = os.path.splitext(filePath) ext = os.path.splitext(file_path)
cmd = [] cmd = []
if ext[1] in (".gz", ".bz2", ".lzma"): if ext[1] in (".gz", ".bz2", ".lzma"):
# Check if this is a tar # Check if this is a tar
@ -88,7 +88,7 @@ def extract(filePath, outputDestination):
return False return False
# Create outputDestination folder # Create outputDestination folder
core.make_dir(outputDestination) core.make_dir(output_destination)
if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)):
passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))] passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))]
@ -96,25 +96,25 @@ def extract(filePath, outputDestination):
passwords = [] passwords = []
core.logger.info("Extracting {file} to {destination}".format core.logger.info("Extracting {file} to {destination}".format
(file=filePath, destination=outputDestination)) (file=file_path, destination=output_destination))
core.logger.debug("Extracting {cmd} {file} {destination}".format core.logger.debug("Extracting {cmd} {file} {destination}".format
(cmd=cmd, file=filePath, destination=outputDestination)) (cmd=cmd, file=file_path, destination=output_destination))
orig_files = [] orig_files = []
orig_dirs = [] orig_dirs = []
for dir, subdirs, files in os.walk(outputDestination): for dir, subdirs, files in os.walk(output_destination):
for subdir in subdirs: for subdir in subdirs:
orig_dirs.append(os.path.join(dir, subdir)) orig_dirs.append(os.path.join(dir, subdir))
for file in files: for file in files:
orig_files.append(os.path.join(dir, file)) orig_files.append(os.path.join(dir, file))
pwd = os.getcwd() # Get our Present Working Directory pwd = os.getcwd() # Get our Present Working Directory
os.chdir(outputDestination) # Not all unpack commands accept full paths, so just extract into this directory os.chdir(output_destination) # Not all unpack commands accept full paths, so just extract into this directory
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
try: # now works same for nt and *nix try: # now works same for nt and *nix
info = None info = None
cmd.append(filePath) # add filePath to final cmd arg. cmd.append(file_path) # add filePath to final cmd arg.
if platform.system() == 'Windows': if platform.system() == 'Windows':
info = subprocess.STARTUPINFO() info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
@ -126,7 +126,7 @@ def extract(filePath, outputDestination):
res = p.wait() res = p.wait()
if res == 0: # Both Linux and Windows return 0 for successful. if res == 0: # Both Linux and Windows return 0 for successful.
core.logger.info("EXTRACTOR: Extraction was successful for {file} to {destination}".format core.logger.info("EXTRACTOR: Extraction was successful for {file} to {destination}".format
(file=filePath, destination=outputDestination)) (file=file_path, destination=output_destination))
success = 1 success = 1
elif len(passwords) > 0: elif len(passwords) > 0:
core.logger.info("EXTRACTOR: Attempting to extract with passwords") core.logger.info("EXTRACTOR: Attempting to extract with passwords")
@ -142,7 +142,7 @@ def extract(filePath, outputDestination):
if (res >= 0 and platform == 'Windows') or res == 0: if (res >= 0 and platform == 'Windows') or res == 0:
core.logger.info("EXTRACTOR: Extraction was successful " core.logger.info("EXTRACTOR: Extraction was successful "
"for {file} to {destination} using password: {pwd}".format "for {file} to {destination} using password: {pwd}".format
(file=filePath, destination=outputDestination, pwd=password)) (file=file_path, destination=output_destination, pwd=password))
success = 1 success = 1
break break
else: else:
@ -150,7 +150,7 @@ def extract(filePath, outputDestination):
except: except:
core.logger.error("EXTRACTOR: Extraction failed for {file}. " core.logger.error("EXTRACTOR: Extraction failed for {file}. "
"Could not call command {cmd}".format "Could not call command {cmd}".format
(file=filePath, cmd=cmd)) (file=file_path, cmd=cmd))
os.chdir(pwd) os.chdir(pwd)
return False return False
@ -159,8 +159,8 @@ def extract(filePath, outputDestination):
if success: if success:
# sleep to let files finish writing to disk # sleep to let files finish writing to disk
sleep(3) sleep(3)
perms = stat.S_IMODE(os.lstat(os.path.split(filePath)[0]).st_mode) perms = stat.S_IMODE(os.lstat(os.path.split(file_path)[0]).st_mode)
for dir, subdirs, files in os.walk(outputDestination): for dir, subdirs, files in os.walk(output_destination):
for subdir in subdirs: for subdir in subdirs:
if not os.path.join(dir, subdir) in orig_files: if not os.path.join(dir, subdir) in orig_files:
try: try:
@ -170,12 +170,12 @@ def extract(filePath, outputDestination):
for file in files: for file in files:
if not os.path.join(dir, file) in orig_files: if not os.path.join(dir, file) in orig_files:
try: try:
shutil.copymode(filePath, os.path.join(dir, file)) shutil.copymode(file_path, os.path.join(dir, file))
except: except:
pass pass
return True return True
else: else:
core.logger.error("EXTRACTOR: Extraction failed for {file}. " core.logger.error("EXTRACTOR: Extraction failed for {file}. "
"Result was {result}".format "Result was {result}".format
(file=filePath, result=res)) (file=file_path, result=res))
return False return False

View file

@ -58,10 +58,10 @@ class NTMRotatingLogHandler(object):
handler.flush() handler.flush()
handler.close() handler.close()
def init_logging(self, consoleLogging=True): def init_logging(self, console_logging=True):
if consoleLogging: if console_logging:
self.console_logging = consoleLogging self.console_logging = console_logging
old_handler = None old_handler = None
@ -180,7 +180,7 @@ class NTMRotatingLogHandler(object):
pp_logger.addHandler(new_file_handler) pp_logger.addHandler(new_file_handler)
db_logger.addHandler(new_file_handler) db_logger.addHandler(new_file_handler)
def log(self, toLog, logLevel=MESSAGE, section='MAIN'): def log(self, to_log, log_level=MESSAGE, section='MAIN'):
with self.log_lock: with self.log_lock:
@ -193,7 +193,7 @@ class NTMRotatingLogHandler(object):
self.writes_since_check += 1 self.writes_since_check += 1
try: try:
message = u"{0}: {1}".format(section.upper(), toLog) message = u"{0}: {1}".format(section.upper(), to_log)
except UnicodeError: except UnicodeError:
message = u"{0}: Message contains non-utf-8 string".format(section.upper()) message = u"{0}: Message contains non-utf-8 string".format(section.upper())
@ -206,22 +206,22 @@ class NTMRotatingLogHandler(object):
setattr(db_logger, 'db', lambda *args: db_logger.log(DB, *args)) setattr(db_logger, 'db', lambda *args: db_logger.log(DB, *args))
try: try:
if logLevel == DEBUG: if log_level == DEBUG:
if core.LOG_DEBUG == 1: if core.LOG_DEBUG == 1:
ntm_logger.debug(out_line) ntm_logger.debug(out_line)
elif logLevel == MESSAGE: elif log_level == MESSAGE:
ntm_logger.info(out_line) ntm_logger.info(out_line)
elif logLevel == WARNING: elif log_level == WARNING:
ntm_logger.warning(out_line) ntm_logger.warning(out_line)
elif logLevel == ERROR: elif log_level == ERROR:
ntm_logger.error(out_line) ntm_logger.error(out_line)
elif logLevel == POSTPROCESS: elif log_level == POSTPROCESS:
pp_logger.postprocess(out_line) pp_logger.postprocess(out_line)
elif logLevel == DB: elif log_level == DB:
if core.LOG_DB == 1: if core.LOG_DB == 1:
db_logger.db(out_line) db_logger.db(out_line)
else: else:
ntm_logger.info(logLevel, out_line) ntm_logger.info(log_level, out_line)
except ValueError: except ValueError:
pass pass
@ -249,32 +249,32 @@ class DispatchingFormatter(object):
ntm_log_instance = NTMRotatingLogHandler(core.LOG_FILE, NUM_LOGS, LOG_SIZE) ntm_log_instance = NTMRotatingLogHandler(core.LOG_FILE, NUM_LOGS, LOG_SIZE)
def log(toLog, logLevel=MESSAGE, section='MAIN'): def log(to_log, log_level=MESSAGE, section='MAIN'):
ntm_log_instance.log(toLog, logLevel, section) ntm_log_instance.log(to_log, log_level, section)
def info(toLog, section='MAIN'): def info(to_log, section='MAIN'):
log(toLog, MESSAGE, section) log(to_log, MESSAGE, section)
def error(toLog, section='MAIN'): def error(to_log, section='MAIN'):
log(toLog, ERROR, section) log(to_log, ERROR, section)
def warning(toLog, section='MAIN'): def warning(to_log, section='MAIN'):
log(toLog, WARNING, section) log(to_log, WARNING, section)
def debug(toLog, section='MAIN'): def debug(to_log, section='MAIN'):
log(toLog, DEBUG, section) log(to_log, DEBUG, section)
def postprocess(toLog, section='POSTPROCESS'): def postprocess(to_log, section='POSTPROCESS'):
log(toLog, POSTPROCESS, section) log(to_log, POSTPROCESS, section)
def db(toLog, section='DB'): def db(to_log, section='DB'):
log(toLog, DB, section) log(to_log, DB, section)
def log_error_and_exit(error_msg): def log_error_and_exit(error_msg):

View file

@ -7,11 +7,11 @@ import core
from core import logger from core import logger
def auto_fork(section, inputCategory): def auto_fork(section, input_category):
# auto-detect correct section # auto-detect correct section
# config settings # config settings
cfg = dict(core.CFG[section][inputCategory]) cfg = dict(core.CFG[section][input_category])
host = cfg.get("host") host = cfg.get("host")
port = cfg.get("port") port = cfg.get("port")
@ -31,26 +31,26 @@ def auto_fork(section, inputCategory):
detected = False detected = False
if section == "NzbDrone": if section == "NzbDrone":
logger.info("Attempting to verify {category} fork".format logger.info("Attempting to verify {category} fork".format
(category=inputCategory)) (category=input_category))
url = "{protocol}{host}:{port}{root}/api/rootfolder".format( url = "{protocol}{host}:{port}{root}/api/rootfolder".format(
protocol=protocol, host=host, port=port, root=web_root) protocol=protocol, host=host, port=port, root=web_root)
headers = {"X-Api-Key": apikey} headers = {"X-Api-Key": apikey}
try: try:
r = requests.get(url, headers=headers, stream=True, verify=False) r = requests.get(url, headers=headers, stream=True, verify=False)
except requests.ConnectionError: except requests.ConnectionError:
logger.warning("Could not connect to {0}:{1} to verify fork!".format(section, inputCategory)) logger.warning("Could not connect to {0}:{1} to verify fork!".format(section, input_category))
if not r.ok: if not r.ok:
logger.warning("Connection to {section}:{category} failed! " logger.warning("Connection to {section}:{category} failed! "
"Check your configuration".format "Check your configuration".format
(section=section, category=inputCategory)) (section=section, category=input_category))
fork = ['default', {}] fork = ['default', {}]
elif fork == "auto": elif fork == "auto":
params = core.ALL_FORKS params = core.ALL_FORKS
rem_params = [] rem_params = []
logger.info("Attempting to auto-detect {category} fork".format(category=inputCategory)) logger.info("Attempting to auto-detect {category} fork".format(category=input_category))
# define the order to test. Default must be first since the default fork doesn't reject parameters. # define the order to test. Default must be first since the default fork doesn't reject parameters.
# then in order of most unique parameters. # then in order of most unique parameters.
@ -75,7 +75,7 @@ def auto_fork(section, inputCategory):
r = s.get(url, auth=(username, password), verify=False) r = s.get(url, auth=(username, password), verify=False)
except requests.ConnectionError: except requests.ConnectionError:
logger.info("Could not connect to {section}:{category} to perform auto-fork detection!".format logger.info("Could not connect to {section}:{category} to perform auto-fork detection!".format
(section=section, category=inputCategory)) (section=section, category=input_category))
r = [] r = []
if r and r.ok: if r and r.ok:
if apikey: if apikey:
@ -99,16 +99,16 @@ def auto_fork(section, inputCategory):
break break
if detected: if detected:
logger.info("{section}:{category} fork auto-detection successful ...".format logger.info("{section}:{category} fork auto-detection successful ...".format
(section=section, category=inputCategory)) (section=section, category=input_category))
elif rem_params: elif rem_params:
logger.info("{section}:{category} fork auto-detection found custom params {params}".format logger.info("{section}:{category} fork auto-detection found custom params {params}".format
(section=section, category=inputCategory, params=params)) (section=section, category=input_category, params=params))
fork = ['custom', params] fork = ['custom', params]
else: else:
logger.info("{section}:{category} fork auto-detection failed".format logger.info("{section}:{category} fork auto-detection failed".format
(section=section, category=inputCategory)) (section=section, category=input_category))
fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)] fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)]
logger.info("{section}:{category} fork set to {fork}".format logger.info("{section}:{category} fork set to {fork}".format
(section=section, category=inputCategory, fork=fork[0])) (section=section, category=input_category, fork=fork[0]))
return fork[0], fork[1] return fork[0], fork[1]

View file

@ -85,7 +85,7 @@ class DBConnection(object):
return sql_result return sql_result
def mass_action(self, querylist, logTransaction=False): def mass_action(self, querylist, log_transaction=False):
if querylist is None: if querylist is None:
return return
@ -96,11 +96,11 @@ class DBConnection(object):
try: try:
for qu in querylist: for qu in querylist:
if len(qu) == 1: if len(qu) == 1:
if logTransaction: if log_transaction:
logger.log(qu[0], logger.DEBUG) logger.log(qu[0], logger.DEBUG)
sql_result.append(self.connection.execute(qu[0])) sql_result.append(self.connection.execute(qu[0]))
elif len(qu) > 1: elif len(qu) > 1:
if logTransaction: if log_transaction:
logger.log(u"{query} with args {args}".format(query=qu[0], args=qu[1]), logger.DEBUG) logger.log(u"{query} with args {args}".format(query=qu[0], args=qu[1]), logger.DEBUG)
sql_result.append(self.connection.execute(qu[0], qu[1])) sql_result.append(self.connection.execute(qu[0], qu[1]))
self.connection.commit() self.connection.commit()
@ -167,20 +167,20 @@ class DBConnection(object):
return sql_results return sql_results
def upsert(self, tableName, valueDict, keyDict): def upsert(self, table_name, value_dict, key_dict):
changes_before = self.connection.total_changes changes_before = self.connection.total_changes
gen_params = lambda myDict: ["{key} = ?".format(key=k) for k in myDict.keys()] gen_params = lambda my_dict: ["{key} = ?".format(key=k) for k in my_dict.keys()]
items = list(valueDict.values()) + list(keyDict.values()) items = list(value_dict.values()) + list(key_dict.values())
self.action( self.action(
"UPDATE {table} " "UPDATE {table} "
"SET {params} " "SET {params} "
"WHERE {conditions}".format( "WHERE {conditions}".format(
table=tableName, table=table_name,
params=", ".join(gen_params(valueDict)), params=", ".join(gen_params(value_dict)),
conditions=" AND ".join(gen_params(keyDict)) conditions=" AND ".join(gen_params(key_dict))
), ),
items items
) )
@ -189,16 +189,16 @@ class DBConnection(object):
self.action( self.action(
"INSERT OR IGNORE INTO {table} ({columns}) " "INSERT OR IGNORE INTO {table} ({columns}) "
"VALUES ({values})".format( "VALUES ({values})".format(
table=tableName, table=table_name,
columns=", ".join(map(text_type, valueDict.keys())), columns=", ".join(map(text_type, value_dict.keys())),
values=", ".join(["?"] * len(valueDict.values())) values=", ".join(["?"] * len(value_dict.values()))
), ),
list(valueDict.values()) list(value_dict.values())
) )
def table_info(self, tableName): def table_info(self, table_name):
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
cursor = self.connection.execute("PRAGMA table_info({0})".format(tableName)) cursor = self.connection.execute("PRAGMA table_info({0})".format(table_name))
columns = {} columns = {}
for column in cursor: for column in cursor:
columns[column['name']] = {'type': column['type']} columns[column['name']] = {'type': column['type']}
@ -237,26 +237,26 @@ def pretty_name(class_name):
return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)]) return ' '.join([x.group() for x in re.finditer("([A-Z])([a-z0-9]+)", class_name)])
def _process_upgrade(connection, upgradeClass): def _process_upgrade(connection, upgrade_class):
instance = upgradeClass(connection) instance = upgrade_class(connection)
logger.log(u"Checking {name} database upgrade".format logger.log(u"Checking {name} database upgrade".format
(name=pretty_name(upgradeClass.__name__)), logger.DEBUG) (name=pretty_name(upgrade_class.__name__)), logger.DEBUG)
if not instance.test(): if not instance.test():
logger.log(u"Database upgrade required: {name}".format logger.log(u"Database upgrade required: {name}".format
(name=pretty_name(upgradeClass.__name__)), logger.MESSAGE) (name=pretty_name(upgrade_class.__name__)), logger.MESSAGE)
try: try:
instance.execute() instance.execute()
except sqlite3.DatabaseError as error: except sqlite3.DatabaseError as error:
print(u"Error in {name}: {msg}".format print(u"Error in {name}: {msg}".format
(name=upgradeClass.__name__, msg=error)) (name=upgrade_class.__name__, msg=error))
raise raise
logger.log(u"{name} upgrade completed".format logger.log(u"{name} upgrade completed".format
(name=upgradeClass.__name__), logger.DEBUG) (name=upgrade_class.__name__), logger.DEBUG)
else: else:
logger.log(u"{name} upgrade not required".format logger.log(u"{name} upgrade not required".format
(name=upgradeClass.__name__), logger.DEBUG) (name=upgrade_class.__name__), logger.DEBUG)
for upgradeSubClass in upgradeClass.__subclasses__(): for upgradeSubClass in upgrade_class.__subclasses__():
_process_upgrade(connection, upgradeSubClass) _process_upgrade(connection, upgradeSubClass)
@ -265,11 +265,11 @@ class SchemaUpgrade(object):
def __init__(self, connection): def __init__(self, connection):
self.connection = connection self.connection = connection
def has_table(self, tableName): def has_table(self, table_name):
return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (tableName,)).fetchall()) > 0 return len(self.connection.action("SELECT 1 FROM sqlite_master WHERE name = ?;", (table_name,)).fetchall()) > 0
def has_column(self, tableName, column): def has_column(self, table_name, column):
return column in self.connection.table_info(tableName) return column in self.connection.table_info(table_name)
def add_column(self, table, column, type="NUMERIC", default=0): def add_column(self, table, column, type="NUMERIC", default=0):
self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type)) self.connection.action("ALTER TABLE {0} ADD {1} {2}".format(table, column, type))

View file

@ -65,8 +65,7 @@ def strip_groups(filename):
return newfile_path return newfile_path
def rename_file(filename, newfilePath): def rename_file(filename, newfile_path):
newfile_path = newfilePath
if os.path.isfile(newfile_path): if os.path.isfile(newfile_path):
newfile_path = os.path.splitext(newfile_path)[0] + ".NTM" + os.path.splitext(newfile_path)[1] newfile_path = os.path.splitext(newfile_path)[0] + ".NTM" + os.path.splitext(newfile_path)[1]
logger.debug("Replacing file name {old} with download name {new}".format logger.debug("Replacing file name {old} with download name {new}".format

View file

@ -9,7 +9,7 @@ from core.nzbToMediaUtil import import_subs, list_media_files, remove_dir
from core.transcoder import transcoder from core.transcoder import transcoder
def external_script(outputDestination, torrentName, torrentLabel, settings): def external_script(output_destination, torrent_name, torrent_label, settings):
final_result = 0 # start at 0. final_result = 0 # start at 0.
num_files = 0 num_files = 0
try: try:
@ -40,14 +40,14 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1)) core.USER_SCRIPT_RUNONCE = int(settings.get("user_script_runOnce", 1))
if core.CHECK_MEDIA: if core.CHECK_MEDIA:
for video in list_media_files(outputDestination, media=True, audio=False, meta=False, archives=False): for video in list_media_files(output_destination, media=True, audio=False, meta=False, archives=False):
if transcoder.is_video_good(video, 0): if transcoder.is_video_good(video, 0):
import_subs(video) import_subs(video)
else: else:
logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT") logger.info("Corrupt video file found {0}. Deleting.".format(video), "USERSCRIPT")
os.unlink(video) os.unlink(video)
for dirpath, dirnames, filenames in os.walk(outputDestination): for dirpath, dirnames, filenames in os.walk(output_destination):
for file in filenames: for file in filenames:
file_path = core.os.path.join(dirpath, file) file_path = core.os.path.join(dirpath, file)
@ -66,14 +66,14 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
command.append('{0}'.format(file_path)) command.append('{0}'.format(file_path))
continue continue
elif param == "TN": elif param == "TN":
command.append('{0}'.format(torrentName)) command.append('{0}'.format(torrent_name))
continue continue
elif param == "TL": elif param == "TL":
command.append('{0}'.format(torrentLabel)) command.append('{0}'.format(torrent_label))
continue continue
elif param == "DN": elif param == "DN":
if core.USER_SCRIPT_RUNONCE == 1: if core.USER_SCRIPT_RUNONCE == 1:
command.append('{0}'.format(outputDestination)) command.append('{0}'.format(output_destination))
else: else:
command.append('{0}'.format(dirpath)) command.append('{0}'.format(dirpath))
continue continue
@ -102,7 +102,7 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
final_result += result final_result += result
num_files_new = 0 num_files_new = 0
for dirpath, dirnames, filenames in os.walk(outputDestination): for dirpath, dirnames, filenames in os.walk(output_destination):
for file in filenames: for file in filenames:
file_name, file_extension = os.path.splitext(file) file_name, file_extension = os.path.splitext(file)
@ -110,8 +110,8 @@ def external_script(outputDestination, torrentName, torrentLabel, settings):
num_files_new += 1 num_files_new += 1
if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0: if core.USER_SCRIPT_CLEAN == int(1) and num_files_new == 0 and final_result == 0:
logger.info("All files have been processed. Cleaning outputDirectory {0}".format(outputDestination)) logger.info("All files have been processed. Cleaning outputDirectory {0}".format(output_destination))
remove_dir(outputDestination) remove_dir(output_destination)
elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0:
logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format( logger.info("{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.".format(
num_files, num_files_new)) num_files, num_files_new))

View file

@ -11,6 +11,7 @@ import socket
import stat import stat
import struct import struct
import time import time
from functools import partial
import beets import beets
import guessit import guessit
@ -36,12 +37,12 @@ def copyfileobj_fast(fsrc, fdst, length=512 * 1024):
shutil.copyfileobjOrig(fsrc, fdst, length=length) shutil.copyfileobjOrig(fsrc, fdst, length=length)
shutil.copyfileobj = copyfileobj_fast shutil.copyfileobj = copyfileobj_fast
def report_nzb(failure_link, clientAgent): def report_nzb(failure_link, client_agent):
# Contact indexer site # Contact indexer site
logger.info("Sending failure notification to indexer site") logger.info("Sending failure notification to indexer site")
if clientAgent == 'nzbget': if client_agent == 'nzbget':
headers = {'User-Agent': 'NZBGet / nzbToMedia.py'} headers = {'User-Agent': 'NZBGet / nzbToMedia.py'}
elif clientAgent == 'sabnzbd': elif client_agent == 'sabnzbd':
headers = {'User-Agent': 'SABnzbd / nzbToMedia.py'} headers = {'User-Agent': 'SABnzbd / nzbToMedia.py'}
else: else:
return return
@ -105,10 +106,7 @@ def remote_dir(path):
return path return path
def category_search(inputDirectory, inputName, inputCategory, root, categories): def category_search(input_directory, input_name, input_category, root, categories):
input_directory = inputDirectory
input_category = inputCategory
input_name = inputName
tordir = False tordir = False
try: try:
@ -202,85 +200,84 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
return input_directory, input_name, input_category, root return input_directory, input_name, input_category, root
def get_dir_size(inputPath): def get_dir_size(input_path):
from functools import partial prepend = partial(os.path.join, input_path)
prepend = partial(os.path.join, inputPath)
return sum([ return sum([
(os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f)) (os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f))
for f in map(prepend, os.listdir(text_type(inputPath))) for f in map(prepend, os.listdir(text_type(input_path)))
]) ])
def is_min_size(inputName, minSize): def is_min_size(input_name, min_size):
file_name, file_ext = os.path.splitext(os.path.basename(inputName)) file_name, file_ext = os.path.splitext(os.path.basename(input_name))
# audio files we need to check directory size not file size # audio files we need to check directory size not file size
input_size = os.path.getsize(inputName) input_size = os.path.getsize(input_name)
if file_ext in core.AUDIOCONTAINER: if file_ext in core.AUDIOCONTAINER:
try: try:
input_size = get_dir_size(os.path.dirname(inputName)) input_size = get_dir_size(os.path.dirname(input_name))
except: except:
logger.error("Failed to get file size for {0}".format(inputName), 'MINSIZE') logger.error("Failed to get file size for {0}".format(input_name), 'MINSIZE')
return True return True
# Ignore files under a certain size # Ignore files under a certain size
if input_size > minSize * 1048576: if input_size > min_size * 1048576:
return True return True
def is_sample(inputName): def is_sample(input_name):
# Ignore 'sample' in files # Ignore 'sample' in files
if re.search('(^|[\W_])sample\d*[\W_]', inputName.lower()): if re.search('(^|[\W_])sample\d*[\W_]', input_name.lower()):
return True return True
def copy_link(src, targetLink, useLink): def copy_link(src, target_link, use_link):
logger.info("MEDIAFILE: [{0}]".format(os.path.basename(targetLink)), 'COPYLINK') logger.info("MEDIAFILE: [{0}]".format(os.path.basename(target_link)), 'COPYLINK')
logger.info("SOURCE FOLDER: [{0}]".format(os.path.dirname(src)), 'COPYLINK') logger.info("SOURCE FOLDER: [{0}]".format(os.path.dirname(src)), 'COPYLINK')
logger.info("TARGET FOLDER: [{0}]".format(os.path.dirname(targetLink)), 'COPYLINK') logger.info("TARGET FOLDER: [{0}]".format(os.path.dirname(target_link)), 'COPYLINK')
if src != targetLink and os.path.exists(targetLink): if src != target_link and os.path.exists(target_link):
logger.info("MEDIAFILE already exists in the TARGET folder, skipping ...", 'COPYLINK') logger.info("MEDIAFILE already exists in the TARGET folder, skipping ...", 'COPYLINK')
return True return True
elif src == targetLink and os.path.isfile(targetLink) and os.path.isfile(src): elif src == target_link and os.path.isfile(target_link) and os.path.isfile(src):
logger.info("SOURCE AND TARGET files are the same, skipping ...", 'COPYLINK') logger.info("SOURCE AND TARGET files are the same, skipping ...", 'COPYLINK')
return True return True
elif src == os.path.dirname(targetLink): elif src == os.path.dirname(target_link):
logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK') logger.info("SOURCE AND TARGET folders are the same, skipping ...", 'COPYLINK')
return True return True
make_dir(os.path.dirname(targetLink)) make_dir(os.path.dirname(target_link))
try: try:
if useLink == 'dir': if use_link == 'dir':
logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') logger.info("Directory linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK')
linktastic.dirlink(src, targetLink) linktastic.dirlink(src, target_link)
return True return True
if useLink == 'junction': if use_link == 'junction':
logger.info("Directory junction linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK') logger.info("Directory junction linking SOURCE FOLDER -> TARGET FOLDER", 'COPYLINK')
linktastic.dirlink(src, targetLink) linktastic.dirlink(src, target_link)
return True return True
elif useLink == "hard": elif use_link == "hard":
logger.info("Hard linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') logger.info("Hard linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK')
linktastic.link(src, targetLink) linktastic.link(src, target_link)
return True return True
elif useLink == "sym": elif use_link == "sym":
logger.info("Sym linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') logger.info("Sym linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK')
linktastic.symlink(src, targetLink) linktastic.symlink(src, target_link)
return True return True
elif useLink == "move-sym": elif use_link == "move-sym":
logger.info("Sym linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') logger.info("Sym linking SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK')
shutil.move(src, targetLink) shutil.move(src, target_link)
linktastic.symlink(targetLink, src) linktastic.symlink(target_link, src)
return True return True
elif useLink == "move": elif use_link == "move":
logger.info("Moving SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') logger.info("Moving SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK')
shutil.move(src, targetLink) shutil.move(src, target_link)
return True return True
except Exception as e: except Exception as e:
logger.warning("Error: {0}, copying instead ... ".format(e), 'COPYLINK') logger.warning("Error: {0}, copying instead ... ".format(e), 'COPYLINK')
logger.info("Copying SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK') logger.info("Copying SOURCE MEDIAFILE -> TARGET FOLDER", 'COPYLINK')
shutil.copy(src, targetLink) shutil.copy(src, target_link)
return True return True
@ -309,26 +306,26 @@ def replace_links(link):
linktastic.symlink(target, link) linktastic.symlink(target, link)
def flatten(outputDestination): def flatten(output_destination):
logger.info("FLATTEN: Flattening directory: {0}".format(outputDestination)) logger.info("FLATTEN: Flattening directory: {0}".format(output_destination))
for outputFile in list_media_files(outputDestination): for outputFile in list_media_files(output_destination):
dir_path = os.path.dirname(outputFile) dir_path = os.path.dirname(outputFile)
file_name = os.path.basename(outputFile) file_name = os.path.basename(outputFile)
if dir_path == outputDestination: if dir_path == output_destination:
continue continue
target = os.path.join(outputDestination, file_name) target = os.path.join(output_destination, file_name)
try: try:
shutil.move(outputFile, target) shutil.move(outputFile, target)
except: except:
logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN') logger.error("Could not flatten {0}".format(outputFile), 'FLATTEN')
remove_empty_folders(outputDestination) # Cleanup empty directories remove_empty_folders(output_destination) # Cleanup empty directories
def remove_empty_folders(path, removeRoot=True): def remove_empty_folders(path, remove_root=True):
"""Function to remove empty folders""" """Function to remove empty folders"""
if not os.path.isdir(path): if not os.path.isdir(path):
return return
@ -344,7 +341,7 @@ def remove_empty_folders(path, removeRoot=True):
# if folder empty, delete it # if folder empty, delete it
files = os.listdir(text_type(path)) files = os.listdir(text_type(path))
if len(files) == 0 and removeRoot: if len(files) == 0 and remove_root:
logger.debug("Removing empty folder:{}".format(path)) logger.debug("Removing empty folder:{}".format(path))
os.rmdir(path) os.rmdir(path)
@ -413,8 +410,7 @@ def wake_up():
logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac)) logger.info("System with mac: {0} has been woken. Continuing with the rest of the script.".format(mac))
def char_replace(Name): def char_replace(name):
name = Name
# Special character hex range: # Special character hex range:
# CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15) # CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15)
# UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF # UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF
@ -456,9 +452,7 @@ def char_replace(Name):
return encoded, name return encoded, name
def convert_to_ascii(inputName, dirName): def convert_to_ascii(input_name, dir_name):
input_name = inputName
dir_name = dirName
ascii_convert = int(core.CFG["ASCII"]["convert"]) ascii_convert = int(core.CFG["ASCII"]["convert"])
if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!. if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!.
@ -629,7 +623,7 @@ def parse_qbittorrent(args):
return input_directory, input_name, input_category, input_hash, input_id return input_directory, input_name, input_category, input_hash, input_id
def parse_args(clientAgent, args): def parse_args(client_agent, args):
clients = { clients = {
'other': parse_other, 'other': parse_other,
'rtorrent': parse_rtorrent, 'rtorrent': parse_rtorrent,
@ -641,7 +635,7 @@ def parse_args(clientAgent, args):
} }
try: try:
return clients[clientAgent](args) return clients[client_agent](args)
except: except:
return None, None, None, None, None return None, None, None, None, None
@ -771,12 +765,12 @@ def onerror(func, path, exc_info):
raise Exception raise Exception
def remove_dir(dirName): def remove_dir(dir_name):
logger.info("Deleting {0}".format(dirName)) logger.info("Deleting {0}".format(dir_name))
try: try:
shutil.rmtree(text_type(dirName), onerror=onerror) shutil.rmtree(text_type(dir_name), onerror=onerror)
except: except:
logger.error("Unable to delete folder {0}".format(dirName)) logger.error("Unable to delete folder {0}".format(dir_name))
def clean_dir(path, section, subsection): def clean_dir(path, section, subsection):
@ -791,7 +785,7 @@ def clean_dir(path, section, subsection):
min_size = int(cfg.get('minSize', 0)) min_size = int(cfg.get('minSize', 0))
delete_ignored = int(cfg.get('delete_ignored', 0)) delete_ignored = int(cfg.get('delete_ignored', 0))
try: try:
num_files = len(list_media_files(path, minSize=min_size, delete_ignored=delete_ignored)) num_files = len(list_media_files(path, min_size=min_size, delete_ignored=delete_ignored))
except: except:
num_files = 'unknown' num_files = 'unknown'
if num_files > 0: if num_files > 0:
@ -807,39 +801,39 @@ def clean_dir(path, section, subsection):
logger.error("Unable to delete directory {0}".format(path)) logger.error("Unable to delete directory {0}".format(path))
def create_torrent_class(clientAgent): def create_torrent_class(client_agent):
# Hardlink solution for Torrents # Hardlink solution for Torrents
tc = None tc = None
if clientAgent == 'utorrent': if client_agent == 'utorrent':
try: try:
logger.debug("Connecting to {0}: {1}".format(clientAgent, core.UTORRENTWEBUI)) logger.debug("Connecting to {0}: {1}".format(client_agent, core.UTORRENTWEBUI))
tc = UTorrentClient(core.UTORRENTWEBUI, core.UTORRENTUSR, core.UTORRENTPWD) tc = UTorrentClient(core.UTORRENTWEBUI, core.UTORRENTUSR, core.UTORRENTPWD)
except: except:
logger.error("Failed to connect to uTorrent") logger.error("Failed to connect to uTorrent")
if clientAgent == 'transmission': if client_agent == 'transmission':
try: try:
logger.debug("Connecting to {0}: http://{1}:{2}".format( logger.debug("Connecting to {0}: http://{1}:{2}".format(
clientAgent, core.TRANSMISSIONHOST, core.TRANSMISSIONPORT)) client_agent, core.TRANSMISSIONHOST, core.TRANSMISSIONPORT))
tc = TransmissionClient(core.TRANSMISSIONHOST, core.TRANSMISSIONPORT, tc = TransmissionClient(core.TRANSMISSIONHOST, core.TRANSMISSIONPORT,
core.TRANSMISSIONUSR, core.TRANSMISSIONUSR,
core.TRANSMISSIONPWD) core.TRANSMISSIONPWD)
except: except:
logger.error("Failed to connect to Transmission") logger.error("Failed to connect to Transmission")
if clientAgent == 'deluge': if client_agent == 'deluge':
try: try:
logger.debug("Connecting to {0}: http://{1}:{2}".format(clientAgent, core.DELUGEHOST, core.DELUGEPORT)) logger.debug("Connecting to {0}: http://{1}:{2}".format(client_agent, core.DELUGEHOST, core.DELUGEPORT))
tc = DelugeClient() tc = DelugeClient()
tc.connect(host=core.DELUGEHOST, port=core.DELUGEPORT, username=core.DELUGEUSR, tc.connect(host=core.DELUGEHOST, port=core.DELUGEPORT, username=core.DELUGEUSR,
password=core.DELUGEPWD) password=core.DELUGEPWD)
except: except:
logger.error("Failed to connect to Deluge") logger.error("Failed to connect to Deluge")
if clientAgent == 'qbittorrent': if client_agent == 'qbittorrent':
try: try:
logger.debug("Connecting to {0}: http://{1}:{2}".format(clientAgent, core.QBITTORRENTHOST, core.QBITTORRENTPORT)) logger.debug("Connecting to {0}: http://{1}:{2}".format(client_agent, core.QBITTORRENTHOST, core.QBITTORRENTPORT))
tc = qBittorrentClient("http://{0}:{1}/".format(core.QBITTORRENTHOST, core.QBITTORRENTPORT)) tc = qBittorrentClient("http://{0}:{1}/".format(core.QBITTORRENTHOST, core.QBITTORRENTPORT))
tc.login(core.QBITTORRENTUSR, core.QBITTORRENTPWD) tc.login(core.QBITTORRENTUSR, core.QBITTORRENTPWD)
except: except:
@ -848,81 +842,81 @@ def create_torrent_class(clientAgent):
return tc return tc
def pause_torrent(clientAgent, inputHash, inputID, inputName): def pause_torrent(client_agent, input_hash, input_id, input_name):
logger.debug("Stopping torrent {0} in {1} while processing".format(inputName, clientAgent)) logger.debug("Stopping torrent {0} in {1} while processing".format(input_name, client_agent))
try: try:
if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": if client_agent == 'utorrent' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.stop(inputHash) core.TORRENT_CLASS.stop(input_hash)
if clientAgent == 'transmission' and core.TORRENT_CLASS != "": if client_agent == 'transmission' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.stop_torrent(inputID) core.TORRENT_CLASS.stop_torrent(input_id)
if clientAgent == 'deluge' and core.TORRENT_CLASS != "": if client_agent == 'deluge' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.core.pause_torrent([inputID]) core.TORRENT_CLASS.core.pause_torrent([input_id])
if clientAgent == 'qbittorrent' and core.TORRENT_CLASS != "": if client_agent == 'qbittorrent' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.pause(inputHash) core.TORRENT_CLASS.pause(input_hash)
time.sleep(5) time.sleep(5)
except: except:
logger.warning("Failed to stop torrent {0} in {1}".format(inputName, clientAgent)) logger.warning("Failed to stop torrent {0} in {1}".format(input_name, client_agent))
def resume_torrent(clientAgent, inputHash, inputID, inputName): def resume_torrent(client_agent, input_hash, input_id, input_name):
if not core.TORRENT_RESUME == 1: if not core.TORRENT_RESUME == 1:
return return
logger.debug("Starting torrent {0} in {1}".format(inputName, clientAgent)) logger.debug("Starting torrent {0} in {1}".format(input_name, client_agent))
try: try:
if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": if client_agent == 'utorrent' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.start(inputHash) core.TORRENT_CLASS.start(input_hash)
if clientAgent == 'transmission' and core.TORRENT_CLASS != "": if client_agent == 'transmission' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.start_torrent(inputID) core.TORRENT_CLASS.start_torrent(input_id)
if clientAgent == 'deluge' and core.TORRENT_CLASS != "": if client_agent == 'deluge' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.core.resume_torrent([inputID]) core.TORRENT_CLASS.core.resume_torrent([input_id])
if clientAgent == 'qbittorrent' and core.TORRENT_CLASS != "": if client_agent == 'qbittorrent' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.resume(inputHash) core.TORRENT_CLASS.resume(input_hash)
time.sleep(5) time.sleep(5)
except: except:
logger.warning("Failed to start torrent {0} in {1}".format(inputName, clientAgent)) logger.warning("Failed to start torrent {0} in {1}".format(input_name, client_agent))
def remove_torrent(clientAgent, inputHash, inputID, inputName): def remove_torrent(client_agent, input_hash, input_id, input_name):
if core.DELETE_ORIGINAL == 1 or core.USELINK == 'move': if core.DELETE_ORIGINAL == 1 or core.USELINK == 'move':
logger.debug("Deleting torrent {0} from {1}".format(inputName, clientAgent)) logger.debug("Deleting torrent {0} from {1}".format(input_name, client_agent))
try: try:
if clientAgent == 'utorrent' and core.TORRENT_CLASS != "": if client_agent == 'utorrent' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.removedata(inputHash) core.TORRENT_CLASS.removedata(input_hash)
core.TORRENT_CLASS.remove(inputHash) core.TORRENT_CLASS.remove(input_hash)
if clientAgent == 'transmission' and core.TORRENT_CLASS != "": if client_agent == 'transmission' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.remove_torrent(inputID, True) core.TORRENT_CLASS.remove_torrent(input_id, True)
if clientAgent == 'deluge' and core.TORRENT_CLASS != "": if client_agent == 'deluge' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.core.remove_torrent(inputID, True) core.TORRENT_CLASS.core.remove_torrent(input_id, True)
if clientAgent == 'qbittorrent' and core.TORRENT_CLASS != "": if client_agent == 'qbittorrent' and core.TORRENT_CLASS != "":
core.TORRENT_CLASS.delete_permanently(inputHash) core.TORRENT_CLASS.delete_permanently(input_hash)
time.sleep(5) time.sleep(5)
except: except:
logger.warning("Failed to delete torrent {0} in {1}".format(inputName, clientAgent)) logger.warning("Failed to delete torrent {0} in {1}".format(input_name, client_agent))
else: else:
resume_torrent(clientAgent, inputHash, inputID, inputName) resume_torrent(client_agent, input_hash, input_id, input_name)
def find_download(clientAgent, download_id): def find_download(client_agent, download_id):
logger.debug("Searching for Download on {0} ...".format(clientAgent)) logger.debug("Searching for Download on {0} ...".format(client_agent))
if clientAgent == 'utorrent': if client_agent == 'utorrent':
torrents = core.TORRENT_CLASS.list()[1]['torrents'] torrents = core.TORRENT_CLASS.list()[1]['torrents']
for torrent in torrents: for torrent in torrents:
if download_id in torrent: if download_id in torrent:
return True return True
if clientAgent == 'transmission': if client_agent == 'transmission':
torrents = core.TORRENT_CLASS.get_torrents() torrents = core.TORRENT_CLASS.get_torrents()
for torrent in torrents: for torrent in torrents:
hash = torrent.hashString hash = torrent.hashString
if hash == download_id: if hash == download_id:
return True return True
if clientAgent == 'deluge': if client_agent == 'deluge':
return False return False
if clientAgent == 'qbittorrent': if client_agent == 'qbittorrent':
torrents = core.TORRENT_CLASS.torrents() torrents = core.TORRENT_CLASS.torrents()
for torrent in torrents: for torrent in torrents:
if torrent['hash'] == download_id: if torrent['hash'] == download_id:
return True return True
if clientAgent == 'sabnzbd': if client_agent == 'sabnzbd':
if "http" in core.SABNZBDHOST: if "http" in core.SABNZBDHOST:
base_url = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT) base_url = "{0}:{1}/api".format(core.SABNZBDHOST, core.SABNZBDPORT)
else: else:
@ -946,7 +940,7 @@ def find_download(clientAgent, download_id):
return False return False
def get_nzoid(inputName): def get_nzoid(input_name):
nzoid = None nzoid = None
slots = [] slots = []
logger.debug("Searching for nzoid from SAbnzbd ...") logger.debug("Searching for nzoid from SAbnzbd ...")
@ -967,7 +961,7 @@ def get_nzoid(inputName):
return nzoid # failure return nzoid # failure
try: try:
result = r.json() result = r.json()
clean_name = os.path.splitext(os.path.split(inputName)[1])[0] clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']]) slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']])
except: except:
logger.warning("Data from SABnzbd queue could not be parsed") logger.warning("Data from SABnzbd queue could not be parsed")
@ -979,13 +973,13 @@ def get_nzoid(inputName):
return nzoid # failure return nzoid # failure
try: try:
result = r.json() result = r.json()
clean_name = os.path.splitext(os.path.split(inputName)[1])[0] clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']]) slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']])
except: except:
logger.warning("Data from SABnzbd history could not be parsed") logger.warning("Data from SABnzbd history could not be parsed")
try: try:
for nzo_id, name in slots: for nzo_id, name in slots:
if name in [inputName, clean_name]: if name in [input_name, clean_name]:
nzoid = nzo_id nzoid = nzo_id
logger.debug("Found nzoid: {0}".format(nzoid)) logger.debug("Found nzoid: {0}".format(nzoid))
break break
@ -1039,14 +1033,14 @@ def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, o
return False return False
def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]): def list_media_files(path, min_size=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=[]):
files = [] files = []
if not os.path.isdir(path): if not os.path.isdir(path):
if os.path.isfile(path): # Single file downloads. if os.path.isfile(path): # Single file downloads.
cur_file = os.path.split(path)[1] cur_file = os.path.split(path)[1]
if is_media_file(cur_file, media, audio, meta, archives, other, otherext): if is_media_file(cur_file, media, audio, meta, archives, other, otherext):
# Optionally ignore sample files # Optionally ignore sample files
if is_sample(path) or not is_min_size(path, minSize): if is_sample(path) or not is_min_size(path, min_size):
if delete_ignored == 1: if delete_ignored == 1:
try: try:
os.unlink(path) os.unlink(path)
@ -1064,11 +1058,11 @@ def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True,
# if it's a folder do it recursively # if it's a folder do it recursively
if os.path.isdir(full_cur_file) and not cur_file.startswith('.'): if os.path.isdir(full_cur_file) and not cur_file.startswith('.'):
files += list_media_files(full_cur_file, minSize, delete_ignored, media, audio, meta, archives, other, otherext) files += list_media_files(full_cur_file, min_size, delete_ignored, media, audio, meta, archives, other, otherext)
elif is_media_file(cur_file, media, audio, meta, archives, other, otherext): elif is_media_file(cur_file, media, audio, meta, archives, other, otherext):
# Optionally ignore sample files # Optionally ignore sample files
if is_sample(full_cur_file) or not is_min_size(full_cur_file, minSize): if is_sample(full_cur_file) or not is_min_size(full_cur_file, min_size):
if delete_ignored == 1: if delete_ignored == 1:
try: try:
os.unlink(full_cur_file) os.unlink(full_cur_file)
@ -1083,20 +1077,20 @@ def list_media_files(path, minSize=0, delete_ignored=0, media=True, audio=True,
return sorted(files, key=len) return sorted(files, key=len)
def find_imdbid(dirName, inputName, omdbApiKey): def find_imdbid(dir_name, input_name, omdb_api_key):
imdbid = None imdbid = None
logger.info('Attemping imdbID lookup for {0}'.format(inputName)) logger.info('Attemping imdbID lookup for {0}'.format(input_name))
# find imdbid in dirName # find imdbid in dirName
logger.info('Searching folder and file names for imdbID ...') logger.info('Searching folder and file names for imdbID ...')
m = re.search('(tt\d{7})', dirName + inputName) m = re.search('(tt\d{7})', dir_name + input_name)
if m: if m:
imdbid = m.group(1) imdbid = m.group(1)
logger.info("Found imdbID [{0}]".format(imdbid)) logger.info("Found imdbID [{0}]".format(imdbid))
return imdbid return imdbid
if os.path.isdir(dirName): if os.path.isdir(dir_name):
for file in os.listdir(text_type(dirName)): for file in os.listdir(text_type(dir_name)):
m = re.search('(tt\d{7})', file) m = re.search('(tt\d{7})', file)
if m: if m:
imdbid = m.group(1) imdbid = m.group(1)
@ -1113,7 +1107,7 @@ def find_imdbid(dirName, inputName, omdbApiKey):
return imdbid return imdbid
logger.info('Searching IMDB for imdbID ...') logger.info('Searching IMDB for imdbID ...')
try: try:
guess = guessit.guessit(inputName) guess = guessit.guessit(input_name)
except: except:
guess = None guess = None
if guess: if guess:
@ -1129,14 +1123,14 @@ def find_imdbid(dirName, inputName, omdbApiKey):
url = "http://www.omdbapi.com" url = "http://www.omdbapi.com"
if not omdbApiKey: if not omdb_api_key:
logger.info("Unable to determine imdbID: No api key provided for ombdapi.com.") logger.info("Unable to determine imdbID: No api key provided for ombdapi.com.")
return return
logger.debug("Opening URL: {0}".format(url)) logger.debug("Opening URL: {0}".format(url))
try: try:
r = requests.get(url, params={'apikey': omdbApiKey, 'y': year, 't': title}, r = requests.get(url, params={'apikey': omdb_api_key, 'y': year, 't': title},
verify=False, timeout=(60, 300)) verify=False, timeout=(60, 300))
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL {0}".format(url)) logger.error("Unable to open URL {0}".format(url))
@ -1156,7 +1150,7 @@ def find_imdbid(dirName, inputName, omdbApiKey):
logger.info("Found imdbID [{0}]".format(imdbid)) logger.info("Found imdbID [{0}]".format(imdbid))
return imdbid return imdbid
logger.warning('Unable to find a imdbID for {0}'.format(inputName)) logger.warning('Unable to find a imdbID for {0}'.format(input_name))
return imdbid return imdbid
@ -1223,14 +1217,14 @@ def import_subs(filename):
logger.error("Failed to download subtitles for {0} due to: {1}".format(filename, e), 'SUBTITLES') logger.error("Failed to download subtitles for {0} due to: {1}".format(filename, e), 'SUBTITLES')
def server_responding(baseURL): def server_responding(base_url):
logger.debug("Attempting to connect to server at {0}".format(baseURL), 'SERVER') logger.debug("Attempting to connect to server at {0}".format(base_url), 'SERVER')
try: try:
requests.get(baseURL, timeout=(60, 120), verify=False) requests.get(base_url, timeout=(60, 120), verify=False)
logger.debug("Server responded at {0}".format(baseURL), 'SERVER') logger.debug("Server responded at {0}".format(base_url), 'SERVER')
return True return True
except (requests.ConnectionError, requests.exceptions.Timeout): except (requests.ConnectionError, requests.exceptions.Timeout):
logger.error("Server failed to respond at {0}".format(baseURL), 'SERVER') logger.error("Server failed to respond at {0}".format(base_url), 'SERVER')
return False return False
@ -1287,20 +1281,20 @@ def backup_versioned_file(old_file, version):
return True return True
def update_download_info_status(inputName, status): def update_download_info_status(input_name, status):
logger.db("Updating status of our download {0} in the DB to {1}".format(inputName, status)) logger.db("Updating status of our download {0} in the DB to {1}".format(input_name, status))
my_db = nzbToMediaDB.DBConnection() my_db = nzbToMediaDB.DBConnection()
my_db.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?", my_db.action("UPDATE downloads SET status=?, last_update=? WHERE input_name=?",
[status, datetime.date.today().toordinal(), text_type(inputName)]) [status, datetime.date.today().toordinal(), text_type(input_name)])
def get_download_info(inputName, status): def get_download_info(input_name, status):
logger.db("Getting download info for {0} from the DB".format(inputName)) logger.db("Getting download info for {0} from the DB".format(input_name))
my_db = nzbToMediaDB.DBConnection() my_db = nzbToMediaDB.DBConnection()
sql_results = my_db.select("SELECT * FROM downloads WHERE input_name=? AND status=?", sql_results = my_db.select("SELECT * FROM downloads WHERE input_name=? AND status=?",
[text_type(inputName), status]) [text_type(input_name), status])
return sql_results return sql_results

View file

@ -116,7 +116,7 @@ def get_video_details(videofile, img=None, bitbucket=None):
return video_details, result return video_details, result
def build_commands(file, newDir, movieName, bitbucket): def build_commands(file, new_dir, movie_name, bitbucket):
if isinstance(file, string_types): if isinstance(file, string_types):
input_file = file input_file = file
if 'concat:' in file: if 'concat:' in file:
@ -126,12 +126,12 @@ def build_commands(file, newDir, movieName, bitbucket):
name, ext = os.path.splitext(name) name, ext = os.path.splitext(name)
check = re.match("VTS_([0-9][0-9])_[0-9]+", name) check = re.match("VTS_([0-9][0-9])_[0-9]+", name)
if check and core.CONCAT: if check and core.CONCAT:
name = movieName name = movie_name
elif check: elif check:
name = ('{0}.cd{1}'.format(movieName, check.groups()[0])) name = ('{0}.cd{1}'.format(movie_name, check.groups()[0]))
elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name): elif core.CONCAT and re.match("(.+)[cC][dD][0-9]", name):
name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name) name = re.sub("([\ \.\-\_\=\:]+[cC][dD][0-9])", "", name)
if ext == core.VEXTENSION and newDir == dir: # we need to change the name to prevent overwriting itself. if ext == core.VEXTENSION and new_dir == dir: # we need to change the name to prevent overwriting itself.
core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext' core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext'
else: else:
img, data = next(iteritems(file)) img, data = next(iteritems(file))
@ -140,7 +140,7 @@ def build_commands(file, newDir, movieName, bitbucket):
input_file = '-' input_file = '-'
file = '-' file = '-'
newfile_path = os.path.normpath(os.path.join(newDir, name) + core.VEXTENSION) newfile_path = os.path.normpath(os.path.join(new_dir, name) + core.VEXTENSION)
map_cmd = [] map_cmd = []
video_cmd = [] video_cmd = []
@ -527,7 +527,7 @@ def get_subs(file):
return subfiles return subfiles
def extract_subs(file, newfilePath, bitbucket): def extract_subs(file, newfile_path, bitbucket):
video_details, result = get_video_details(file) video_details, result = get_video_details(file)
if not video_details: if not video_details:
return return
@ -535,8 +535,8 @@ def extract_subs(file, newfilePath, bitbucket):
if core.SUBSDIR: if core.SUBSDIR:
subdir = core.SUBSDIR subdir = core.SUBSDIR
else: else:
subdir = os.path.split(newfilePath)[0] subdir = os.path.split(newfile_path)[0]
name = os.path.splitext(os.path.split(newfilePath)[1])[0] name = os.path.splitext(os.path.split(newfile_path)[1])[0]
try: try:
sub_streams = [item for item in video_details["streams"] if sub_streams = [item for item in video_details["streams"] if
@ -586,17 +586,17 @@ def extract_subs(file, newfilePath, bitbucket):
logger.error("Extracting subtitles has failed") logger.error("Extracting subtitles has failed")
def process_list(List, newDir, bitbucket): def process_list(it, new_dir, bitbucket):
rem_list = [] rem_list = []
new_list = [] new_list = []
combine = [] combine = []
vts_path = None vts_path = None
success = True success = True
for item in List: for item in it:
ext = os.path.splitext(item)[1].lower() ext = os.path.splitext(item)[1].lower()
if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS: if ext in ['.iso', '.bin', '.img'] and ext not in core.IGNOREEXTENSIONS:
logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER") logger.debug("Attempting to rip disk image: {0}".format(item), "TRANSCODER")
new_list.extend(rip_iso(item, newDir, bitbucket)) new_list.extend(rip_iso(item, new_dir, bitbucket))
rem_list.append(item) rem_list.append(item)
elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS: elif re.match(".+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]", item) and '.vob' not in core.IGNOREEXTENSIONS:
logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER") logger.debug("Found VIDEO_TS image file: {0}".format(item), "TRANSCODER")
@ -622,18 +622,18 @@ def process_list(List, newDir, bitbucket):
success = False success = False
break break
if success and new_list: if success and new_list:
List.extend(new_list) it.extend(new_list)
for item in rem_list: for item in rem_list:
List.remove(item) it.remove(item)
logger.debug("Successfully extracted .vob file {0} from disk image".format(new_list[0]), "TRANSCODER") logger.debug("Successfully extracted .vob file {0} from disk image".format(new_list[0]), "TRANSCODER")
elif new_list and not success: elif new_list and not success:
new_list = [] new_list = []
rem_list = [] rem_list = []
logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER") logger.error("Failed extracting .vob files from disk image. Stopping transcoding.", "TRANSCODER")
return List, rem_list, new_list, success return it, rem_list, new_list, success
def rip_iso(item, newDir, bitbucket): def rip_iso(item, new_dir, bitbucket):
new_files = [] new_files = []
failure_dir = 'failure' failure_dir = 'failure'
# Mount the ISO in your OS and call combineVTS. # Mount the ISO in your OS and call combineVTS.
@ -681,7 +681,7 @@ def rip_iso(item, newDir, bitbucket):
return new_files return new_files
def combine_vts(vtsPath): def combine_vts(vts_path):
new_files = [] new_files = []
combined = '' combined = ''
for n in range(99): for n in range(99):
@ -689,8 +689,8 @@ def combine_vts(vtsPath):
m = 1 m = 1
while True: while True:
vts_name = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) vts_name = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m)
if os.path.isfile(os.path.join(vtsPath, vts_name)): if os.path.isfile(os.path.join(vts_path, vts_name)):
concat += '{file}|'.format(file=os.path.join(vtsPath, vts_name)) concat += '{file}|'.format(file=os.path.join(vts_path, vts_name))
m += 1 m += 1
else: else:
break break
@ -728,29 +728,29 @@ def print_cmd(command):
logger.debug("calling command:{0}".format(cmd)) logger.debug("calling command:{0}".format(cmd))
def transcode_directory(dirName): def transcode_directory(dir_name):
if not core.FFMPEG: if not core.FFMPEG:
return 1, dirName return 1, dir_name
logger.info("Checking for files to be transcoded") logger.info("Checking for files to be transcoded")
final_result = 0 # initialize as successful final_result = 0 # initialize as successful
if core.OUTPUTVIDEOPATH: if core.OUTPUTVIDEOPATH:
new_dir = core.OUTPUTVIDEOPATH new_dir = core.OUTPUTVIDEOPATH
make_dir(new_dir) make_dir(new_dir)
name = os.path.splitext(os.path.split(dirName)[1])[0] name = os.path.splitext(os.path.split(dir_name)[1])[0]
new_dir = os.path.join(new_dir, name) new_dir = os.path.join(new_dir, name)
make_dir(new_dir) make_dir(new_dir)
else: else:
new_dir = dirName new_dir = dir_name
if platform.system() == 'Windows': if platform.system() == 'Windows':
bitbucket = open('NUL') bitbucket = open('NUL')
else: else:
bitbucket = open('/dev/null') bitbucket = open('/dev/null')
movie_name = os.path.splitext(os.path.split(dirName)[1])[0] movie_name = os.path.splitext(os.path.split(dir_name)[1])[0]
file_list = core.list_media_files(dirName, media=True, audio=False, meta=False, archives=False) file_list = core.list_media_files(dir_name, media=True, audio=False, meta=False, archives=False)
file_list, rem_list, new_list, success = process_list(file_list, new_dir, bitbucket) file_list, rem_list, new_list, success = process_list(file_list, new_dir, bitbucket)
if not success: if not success:
bitbucket.close() bitbucket.close()
return 1, dirName return 1, dir_name
for file in file_list: for file in file_list:
if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS:
@ -821,8 +821,8 @@ def transcode_directory(dirName):
pass pass
if not os.listdir(text_type(new_dir)): # this is an empty directory and we didn't transcode into it. if not os.listdir(text_type(new_dir)): # this is an empty directory and we didn't transcode into it.
os.rmdir(new_dir) os.rmdir(new_dir)
new_dir = dirName new_dir = dir_name
if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB
new_dir = dirName new_dir = dir_name
bitbucket.close() bitbucket.close()
return final_result, new_dir return final_result, new_dir

View file

@ -644,20 +644,17 @@ except NameError:
# post-processing # post-processing
def process(inputDirectory, inputName=None, status=0, clientAgent='manual', download_id=None, inputCategory=None, failureLink=None): def process(input_directory, input_name=None, status=0, client_agent='manual', download_id=None, input_category=None, failure_link=None):
input_directory = inputDirectory
input_name = inputName
input_category = inputCategory
if core.SAFE_MODE and input_directory == core.NZB_DEFAULTDIR: if core.SAFE_MODE and input_directory == core.NZB_DEFAULTDIR:
logger.error( logger.error(
'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format( 'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format(
input_directory)) input_directory))
return [-1, ""] return [-1, ""]
if not download_id and clientAgent == 'sabnzbd': if not download_id and client_agent == 'sabnzbd':
download_id = get_nzoid(input_name) download_id = get_nzoid(input_name)
if clientAgent != 'manual' and not core.DOWNLOADINFO: if client_agent != 'manual' and not core.DOWNLOADINFO:
logger.debug('Adding NZB download info for directory {0} to database'.format(input_directory)) logger.debug('Adding NZB download info for directory {0} to database'.format(input_directory))
my_db = nzbToMediaDB.DBConnection() my_db = nzbToMediaDB.DBConnection()
@ -675,7 +672,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
new_value_dict = {"input_name": text_type(input_name1), new_value_dict = {"input_name": text_type(input_name1),
"input_hash": text_type(download_id), "input_hash": text_type(download_id),
"input_id": text_type(download_id), "input_id": text_type(download_id),
"client_agent": text_type(clientAgent), "client_agent": text_type(client_agent),
"status": 0, "status": 0,
"last_update": datetime.date.today().toordinal() "last_update": datetime.date.today().toordinal()
} }
@ -732,18 +729,18 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name)) logger.info("Calling {0}:{1} to post-process:{2}".format(section_name, input_category, input_name))
if section_name in ["CouchPotato", "Radarr"]: if section_name in ["CouchPotato", "Radarr"]:
result = Movie().process(section_name, input_directory, input_name, status, clientAgent, download_id, result = Movie().process(section_name, input_directory, input_name, status, client_agent, download_id,
input_category, failureLink) input_category, failure_link)
elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]: elif section_name in ["SickBeard", "NzbDrone", "Sonarr"]:
result = TV().process_episode(section_name, input_directory, input_name, status, clientAgent, result = TV().process_episode(section_name, input_directory, input_name, status, client_agent,
download_id, input_category, failureLink) download_id, input_category, failure_link)
elif section_name in ["HeadPhones", "Lidarr"]: elif section_name in ["HeadPhones", "Lidarr"]:
result = Music().process(section_name, input_directory, input_name, status, clientAgent, input_category) result = Music().process(section_name, input_directory, input_name, status, client_agent, input_category)
elif section_name == "Mylar": elif section_name == "Mylar":
result = Comic().process_episode(section_name, input_directory, input_name, status, clientAgent, result = Comic().process_episode(section_name, input_directory, input_name, status, client_agent,
input_category) input_category)
elif section_name == "Gamez": elif section_name == "Gamez":
result = Game().process(section_name, input_directory, input_name, status, clientAgent, input_category) result = Game().process(section_name, input_directory, input_name, status, client_agent, input_category)
elif section_name == 'UserScript': elif section_name == 'UserScript':
result = external_script(input_directory, input_name, input_category, section[usercat]) result = external_script(input_directory, input_name, input_category, section[usercat])
else: else:
@ -752,7 +749,7 @@ def process(inputDirectory, inputName=None, status=0, clientAgent='manual', down
plex_update(input_category) plex_update(input_category)
if result[0] == 0: if result[0] == 0:
if clientAgent != 'manual': if client_agent != 'manual':
# update download status in our DB # update download status in our DB
update_download_info_status(input_name, 1) update_download_info_status(input_name, 1)
if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']: if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']:
@ -836,8 +833,8 @@ def main(args, section=None):
# All checks done, now launching the script. # All checks done, now launching the script.
client_agent = 'nzbget' client_agent = 'nzbget'
result = process(os.environ['NZBPP_DIRECTORY'], input_name=os.environ['NZBPP_NZBNAME'], status=status, result = process(os.environ['NZBPP_DIRECTORY'], input_name=os.environ['NZBPP_NZBNAME'], status=status,
clientAgent=client_agent, download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'], client_agent=client_agent, download_id=download_id, input_category=os.environ['NZBPP_CATEGORY'],
failureLink=failure_link) failure_link=failure_link)
# SABnzbd Pre 0.7.17 # SABnzbd Pre 0.7.17
elif len(args) == core.SABNZB_NO_OF_ARGUMENTS: elif len(args) == core.SABNZB_NO_OF_ARGUMENTS:
# SABnzbd argv: # SABnzbd argv:
@ -850,7 +847,7 @@ def main(args, section=None):
# 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2
client_agent = 'sabnzbd' client_agent = 'sabnzbd'
logger.info("Script triggered from SABnzbd") logger.info("Script triggered from SABnzbd")
result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], clientAgent=client_agent, result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], client_agent=client_agent,
download_id='') download_id='')
# SABnzbd 0.7.17+ # SABnzbd 0.7.17+
elif len(args) >= core.SABNZB_0717_NO_OF_ARGUMENTS: elif len(args) >= core.SABNZB_0717_NO_OF_ARGUMENTS:
@ -865,8 +862,8 @@ def main(args, section=None):
# 8 Failure URL # 8 Failure URL
client_agent = 'sabnzbd' client_agent = 'sabnzbd'
logger.info("Script triggered from SABnzbd 0.7.17+") logger.info("Script triggered from SABnzbd 0.7.17+")
result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], clientAgent=client_agent, result = process(args[1], input_name=args[2], status=args[7], input_category=args[5], client_agent=client_agent,
download_id='', failureLink=''.join(args[8:])) download_id='', failure_link=''.join(args[8:]))
# Generic program # Generic program
elif len(args) > 5 and args[5] == 'generic': elif len(args) > 5 and args[5] == 'generic':
logger.info("Script triggered from generic program") logger.info("Script triggered from generic program")
@ -910,7 +907,7 @@ def main(args, section=None):
except UnicodeError: except UnicodeError:
pass pass
results = process(dir_name, input_name, 0, clientAgent=client_agent, results = process(dir_name, input_name, 0, client_agent=client_agent,
download_id=download_id or None, input_category=subsection) download_id=download_id or None, input_category=subsection)
if results[0] != 0: if results[0] != 0:
logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format logger.error("A problem was reported when trying to perform a manual run for {0}:{1}.".format