Updated logger code to now include the section, formatting has been modified as well.

Logging of debug messages is now optional via log_debug option location in autoProcessMedia.cfg

Lots of code cleanup has been performed including cleanup log messages and corrections of spelling errors.

Improved release lookup code for autoProcessMovie, narrows search results down by making API calls to the download clients to compare results in CouchPotato's database.
This commit is contained in:
echel0n 2014-04-18 13:16:18 -07:00
commit eb7822b60b
16 changed files with 420 additions and 441 deletions

View file

@ -4,7 +4,6 @@ import socket
import stat
import struct
import shutil
import sys
import time
from lib import requests
import nzbtomedia
@ -15,9 +14,10 @@ from nzbtomedia.synchronousdeluge.client import DelugeClient
from nzbtomedia.utorrent.client import UTorrentClient
from nzbtomedia.transmissionrpc.client import Client as TransmissionClient
def safeName(name):
safename = re.sub(r"[\/\\\:\*\?\"\<\>\|]", "", name) #make this name safe for use in directories for windows etc.
return safename
return re.sub(r"[\/\\\:\*\?\"\<\>\|]", "", name) #make this name safe for use in directories for windows etc.
def makeDir(path):
if not os.path.isdir(path):
@ -27,6 +27,7 @@ def makeDir(path):
return False
return True
def category_search(inputDirectory, inputName, inputCategory, root, categories):
single = False
tordir = False
@ -49,16 +50,18 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
return inputDirectory, inputName, inputCategory, root, single
if inputCategory and os.path.isdir(os.path.join(inputDirectory, inputCategory)):
logger.info("SEARCH: Found category directory %s in input directory directory %s", inputCategory, inputDirectory)
logger.info(
"SEARCH: Found category directory %s in input directory directory %s" % (inputCategory, inputDirectory))
inputDirectory = os.path.join(inputDirectory, inputCategory)
logger.info("SEARCH: Setting inputDirectory to %s", inputDirectory)
if inputName and os.path.isdir(os.path.join(inputDirectory, inputName)):
logger.info("SEARCH: Found torrent directory %s in input directory directory %s", inputName, inputDirectory)
logger.info("SEARCH: Found torrent directory %s in input directory directory %s" % (inputName, inputDirectory))
inputDirectory = os.path.join(inputDirectory, inputName)
logger.info("SEARCH: Setting inputDirectory to %s", inputDirectory)
tordir = True
if inputName and os.path.isdir(os.path.join(inputDirectory, safeName(inputName))):
logger.info("SEARCH: Found torrent directory %s in input directory directory %s", safeName(inputName), inputDirectory)
logger.info("SEARCH: Found torrent directory %s in input directory directory %s" % (
safeName(inputName), inputDirectory))
inputDirectory = os.path.join(inputDirectory, safeName(inputName))
logger.info("SEARCH: Setting inputDirectory to %s", inputDirectory)
tordir = True
@ -73,8 +76,8 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
index = pathlist.index(inputCategory)
if index + 1 < len(pathlist):
tordir = True
logger.info("SEARCH: Found a unique directory %s in the category directory", pathlist[index+1])
if not inputName: inputName = pathlist[index+1]
logger.info("SEARCH: Found a unique directory %s in the category directory", pathlist[index + 1])
if not inputName: inputName = pathlist[index + 1]
except ValueError:
pass
@ -93,6 +96,7 @@ def category_search(inputDirectory, inputName, inputCategory, root, categories):
return inputDirectory, inputName, inputCategory, root, single
def is_sample(filePath, inputName, minSampleSize, SampleIDs):
# 200 MB in bytes
SIZE_CUTOFF = minSampleSize * 1024 * 1024
@ -101,7 +105,7 @@ def is_sample(filePath, inputName, minSampleSize, SampleIDs):
return True
# Ignore 'sample' in files unless 'sample' in Torrent Name
for ident in SampleIDs:
if ident.lower() in filePath.lower() and not ident.lower() in inputName.lower():
if ident.lower() in filePath.lower() and not ident.lower() in inputName.lower():
return True
# Return False if none of these were met.
return False
@ -115,41 +119,45 @@ def copy_link(filePath, targetDirectory, useLink, outputDestination):
makeDir(outputDestination)
if useLink == "hard":
try:
logger.info("COPYLINK: Hard linking %s to %s", filePath, targetDirectory)
logger.info("COPYLINK: Hard linking %s to %s" % (filePath, targetDirectory))
linktastic.link(filePath, targetDirectory)
except:
logger.error("COPYLINK")
if os.path.isfile(targetDirectory):
logger.warning("COPYLINK: Something went wrong in linktastic.link, but the destination file was created")
logger.warning(
"COPYLINK: Something went wrong in linktastic.link, but the destination file was created")
else:
logger.warning("COPYLINK: Something went wrong in linktastic.link, copying instead")
logger.debug("COPYLINK: Copying %s to %s", filePath, targetDirectory)
logger.debug("COPYLINK: Copying %s to %s" % (filePath, targetDirectory))
shutil.copy(filePath, targetDirectory)
elif useLink == "sym":
try:
logger.info("COPYLINK: Moving %s to %s before sym linking", filePath, targetDirectory)
logger.info("COPYLINK: Moving %s to %s before sym linking" % (filePath, targetDirectory))
shutil.move(filePath, targetDirectory)
logger.info("COPYLINK: Sym linking %s to %s", targetDirectory, filePath)
logger.info("COPYLINK: Sym linking %s to %s" % (targetDirectory, filePath))
linktastic.symlink(targetDirectory, filePath)
except:
logger.error("COPYLINK")
if os.path.isfile(targetDirectory):
logger.warning("COPYLINK: Something went wrong in linktastic.link, but the destination file was created")
logger.warning(
"COPYLINK: Something went wrong in linktastic.link, but the destination file was created")
else:
logger.info("COPYLINK: Something went wrong in linktastic.link, copying instead")
logger.debug("COPYLINK: Copying %s to %s", filePath, targetDirectory)
logger.debug("COPYLINK: Copying %s to %s" % (filePath, targetDirectory))
shutil.copy(filePath, targetDirectory)
elif useLink == "move":
logger.debug("Moving %s to %s", filePath, targetDirectory)
logger.debug("Moving %s to %s" % (filePath, targetDirectory))
shutil.move(filePath, targetDirectory)
else:
logger.debug("Copying %s to %s", filePath, targetDirectory)
logger.debug("Copying %s to %s" % (filePath, targetDirectory))
shutil.copy(filePath, targetDirectory)
return True
def flatten(outputDestination):
logger.info("FLATTEN: Flattening directory: %s", outputDestination)
for dirpath, dirnames, filenames in os.walk(outputDestination): # Flatten out the directory to make postprocessing easier
for dirpath, dirnames, filenames in os.walk(
outputDestination): # Flatten out the directory to make postprocessing easier
if dirpath == outputDestination:
continue # No need to try and move files in the root destination directory
for filename in filenames:
@ -161,6 +169,7 @@ def flatten(outputDestination):
logger.error("FLATTEN: Could not flatten %s", source)
removeEmptyFolders(outputDestination) # Cleanup empty directories
def removeEmptyFolders(path):
logger.info("REMOVER: Removing empty folders in: %s", path)
if not os.path.isdir(path):
@ -180,6 +189,7 @@ def removeEmptyFolders(path):
logger.debug("REMOVER: Removing empty folder: %s", path)
os.rmdir(path)
def remove_read_only(path):
if not os.path.isdir(path):
return
@ -188,16 +198,16 @@ def remove_read_only(path):
logger.debug("Removing Read Only Flag for: %s", filename)
os.chmod(os.path.join(dirpath, filename), stat.S_IWRITE)
#Wake function
def WakeOnLan(ethernet_address):
addr_byte = ethernet_address.split(':')
hw_addr = struct.pack('BBBBBB', int(addr_byte[0], 16),
int(addr_byte[1], 16),
int(addr_byte[2], 16),
int(addr_byte[3], 16),
int(addr_byte[4], 16),
int(addr_byte[5], 16))
int(addr_byte[1], 16),
int(addr_byte[2], 16),
int(addr_byte[3], 16),
int(addr_byte[4], 16),
int(addr_byte[5], 16))
# Build the Wake-On-LAN "Magic Packet"...
@ -210,6 +220,7 @@ def WakeOnLan(ethernet_address):
ss.sendto(msg, ('<broadcast>', 9))
ss.close()
#Test Connection function
def TestCon(host, port):
try:
@ -218,50 +229,55 @@ def TestCon(host, port):
except:
return "Down"
def WakeUp():
wake = int(nzbtomedia.CFG["WakeOnLan"]["wake"])
if wake == 0: # just return if we don't need to wake anything.
if wake == 0: # just return if we don't need to wake anything.
return
logger.info(("Loading WakeOnLan config from %s", nzbtomedia.CONFIG_FILE))
host = nzbtomedia.CFG["WakeOnLan"]["host"]
port = int(nzbtomedia.CFG["WakeOnLan"]["port"])
mac = nzbtomedia.CFG["WakeOnLan"]["mac"]
i=1
i = 1
while TestCon(host, port) == "Down" and i < 4:
logger.info(("Sending WakeOnLan Magic Packet for mac: %s", mac))
WakeOnLan(mac)
time.sleep(20)
i=i+1
i = i + 1
if TestCon(host,port) == "Down": # final check.
logger.warning("System with mac: %s has not woken after 3 attempts. Continuing with the rest of the script.", mac)
if TestCon(host, port) == "Down": # final check.
logger.warning("System with mac: %s has not woken after 3 attempts. Continuing with the rest of the script.",
mac)
else:
logger.info("System with mac: %s has been woken. Continuing with the rest of the script.", mac)
def convert_to_ascii(nzbName, dirName):
ascii_convert = int(nzbtomedia.CFG["ASCII"]["convert"])
if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!.
if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and "\" is replaced!.
return nzbName, dirName
nzbName2 = str(nzbName.decode('ascii', 'replace').replace(u'\ufffd', '_'))
dirName2 = str(dirName.decode('ascii', 'replace').replace(u'\ufffd', '_'))
if dirName != dirName2:
logger.info("Renaming directory:%s to: %s.", dirName, dirName2)
logger.info("Renaming directory:%s to: %s." % (dirName, dirName2))
shutil.move(dirName, dirName2)
for dirpath, dirnames, filesnames in os.walk(dirName2):
for filename in filesnames:
filename2 = str(filename.decode('ascii', 'replace').replace(u'\ufffd', '_'))
if filename != filename2:
logger.info("Renaming file:%s to: %s.", filename, filename2)
logger.info("Renaming file:%s to: %s." % (filename, filename2))
shutil.move(filename, filename2)
nzbName = nzbName2
dirName = dirName2
return nzbName, dirName
def parse_other(args):
return os.path.normpath(args[1]), '', '', '', ''
def parse_rtorrent(args):
# rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia,
# "execute={/path/to/nzbToMedia/TorrentToMedia.py,\"$d.get_base_path=\",\"$d.get_name=\",\"$d.get_custom1=\",\"$d.get_hash=\"}"
@ -285,6 +301,7 @@ def parse_rtorrent(args):
return inputDirectory, inputName, inputCategory, inputHash, inputID
def parse_utorrent(args):
# uTorrent usage: call TorrentToMedia.py "%D" "%N" "%L" "%I"
inputDirectory = os.path.normpath(args[1])
@ -324,21 +341,23 @@ def parse_transmission(args):
inputID = os.getenv('TR_TORRENT_ID')
return inputDirectory, inputName, inputCategory, inputHash, inputID
def parse_args(clientAgent, args):
clients = {
'other': parse_other,
'rtorrent': parse_rtorrent,
'utorrent': parse_utorrent,
'deluge': parse_deluge,
'transmission': parse_transmission,
'other': parse_other,
'rtorrent': parse_rtorrent,
'utorrent': parse_utorrent,
'deluge': parse_deluge,
'transmission': parse_transmission,
}
try:
return clients[clientAgent](args)
except:return None, None, None, None, None
except:
return None, None, None, None, None
def get_dirnames(section, subsections=None):
dirNames = []
if subsections is None:
@ -386,13 +405,14 @@ def get_dirnames(section, subsections=None):
shutil.move(mediafile, p)
dirNames.extend([os.path.join(outputDirectory, o) for o in os.listdir(outputDirectory) if
os.path.isdir(os.path.join(outputDirectory, o))])
os.path.isdir(os.path.join(outputDirectory, o))])
if not dirNames:
logger.warning("%s:%s has no directories identified for post-processing", section, subsection)
logger.warning("%s:%s has no directories identified for post-processing" % (section, subsection))
return list(set(dirNames))
def delete(dirName):
logger.info("Deleting %s", dirName)
try:
@ -400,6 +420,7 @@ def delete(dirName):
except:
logger.error("Unable to delete folder %s", dirName)
def cleanup_directories(inputCategory, processCategories, result, directory):
if inputCategory in processCategories and result == 0 and os.path.isdir(directory):
num_files_new = int(0)
@ -415,90 +436,97 @@ def cleanup_directories(inputCategory, processCategories, result, directory):
logger.info("All files have been processed. Cleaning directory %s", directory)
shutil.rmtree(directory)
else:
logger.info("Directory %s still contains %s media and/or meta files. This directory will not be removed.", directory, num_files_new)
logger.info(
"Directory %s still contains %s media and/or meta files. This directory will not be removed." % (
directory, num_files_new))
for item in file_list:
logger.debug("media/meta file found: %s", item)
def create_torrent_class(clientAgent):
# Hardlink solution for Torrents
TorrentClass = None
if clientAgent == 'utorrent':
try:
logger.debug("Connecting to %s: %s", clientAgent, nzbtomedia.UTORRENTWEBUI)
logger.debug("Connecting to %s: %s" % (clientAgent, nzbtomedia.UTORRENTWEBUI))
TorrentClass = UTorrentClient(nzbtomedia.UTORRENTWEBUI, nzbtomedia.UTORRENTUSR, nzbtomedia.UTORRENTPWD)
except:
logger.error("Failed to connect to uTorrent")
if clientAgent == 'transmission':
try:
logger.debug("Connecting to %s: http://%s:%s", clientAgent, nzbtomedia.TRANSMISSIONHOST,
nzbtomedia.TRANSMISSIONPORT)
TorrentClass = TransmissionClient(nzbtomedia.TRANSMISSIONHOST, nzbtomedia.TRANSMISSIONPORT, nzbtomedia.TRANSMISSIONUSR,
logger.debug("Connecting to %s: http://%s:%s" % (
clientAgent, nzbtomedia.TRANSMISSIONHOST, nzbtomedia.TRANSMISSIONPORT))
TorrentClass = TransmissionClient(nzbtomedia.TRANSMISSIONHOST, nzbtomedia.TRANSMISSIONPORT,
nzbtomedia.TRANSMISSIONUSR,
nzbtomedia.TRANSMISSIONPWD)
except:
logger.error("Failed to connect to Transmission")
if clientAgent == 'deluge':
try:
logger.debug("Connecting to %s: http://%s:%s", clientAgent, nzbtomedia.DELUGEHOST,
nzbtomedia.DELUGEPORT)
logger.debug("Connecting to %s: http://%s:%s" % (clientAgent, nzbtomedia.DELUGEHOST, nzbtomedia.DELUGEPORT))
TorrentClass = DelugeClient()
TorrentClass.connect(host =nzbtomedia.DELUGEHOST, port =nzbtomedia.DELUGEPORT, username =nzbtomedia.DELUGEUSR, password =nzbtomedia.DELUGEPWD)
TorrentClass.connect(host=nzbtomedia.DELUGEHOST, port=nzbtomedia.DELUGEPORT, username=nzbtomedia.DELUGEUSR,
password=nzbtomedia.DELUGEPWD)
except:
logger.error("Failed to connect to Deluge")
return TorrentClass
def pause_torrent(clientAgent, TorrentClass, inputHash, inputID, inputName):
# if we are using links with Torrents it means we need to pause it in order to access the files
logger.debug("Stoping torrent %s in %s while processing", inputName, clientAgent)
logger.debug("Stoping torrent %s in %s while processing" % (inputName, clientAgent))
if clientAgent == 'utorrent' and TorrentClass != "":
TorrentClass.stop(inputHash)
if clientAgent == 'transmission' and TorrentClass !="":
if clientAgent == 'transmission' and TorrentClass != "":
TorrentClass.stop_torrent(inputID)
if clientAgent == 'deluge' and TorrentClass != "":
TorrentClass.core.pause_torrent([inputID])
time.sleep(5) # Give Torrent client some time to catch up with the change
def resume_torrent(clientAgent, TorrentClass, inputHash, inputID, result, inputName):
# Hardlink solution for uTorrent, need to implent support for deluge, transmission
if clientAgent in ['utorrent', 'transmission', 'deluge'] and inputHash:
if clientAgent in ['utorrent', 'transmission', 'deluge'] and inputHash:
# Delete torrent and torrentdata from Torrent client if processing was successful.
if (int(nzbtomedia.CFG["Torrent"]["deleteOriginal"]) is 1 and result != 1) or nzbtomedia.USELINK == 'move': # if we move files, nothing to resume seeding.
logger.debug("Deleting torrent %s from %s", inputName, clientAgent)
if (int(nzbtomedia.CFG["Torrent"][
"deleteOriginal"]) is 1 and result != 1) or nzbtomedia.USELINK == 'move': # if we move files, nothing to resume seeding.
logger.debug("Deleting torrent %s from %s" % (inputName, clientAgent))
if clientAgent == 'utorrent' and TorrentClass != "":
TorrentClass.removedata(inputHash)
TorrentClass.remove(inputHash)
if clientAgent == 'transmission' and TorrentClass !="":
if clientAgent == 'transmission' and TorrentClass != "":
TorrentClass.remove_torrent(inputID, True)
if clientAgent == 'deluge' and TorrentClass != "":
TorrentClass.core.remove_torrent(inputID, True)
# we always want to resume seeding, for now manually find out what is wrong when extraction fails
else:
logger.debug("Starting torrent %s in %s", inputName, clientAgent)
logger.debug("Starting torrent %s in %s" % (inputName, clientAgent))
if clientAgent == 'utorrent' and TorrentClass != "":
TorrentClass.start(inputHash)
if clientAgent == 'transmission' and TorrentClass !="":
if clientAgent == 'transmission' and TorrentClass != "":
TorrentClass.start_torrent(inputID)
if clientAgent == 'deluge' and TorrentClass != "":
TorrentClass.core.resume_torrent([inputID])
time.sleep(5)
def find_download(clientAgent, nzbName, download_id):
def find_download(clientAgent, download_id):
tc = create_torrent_class(clientAgent)
logger.debug("Searching for Download on %s ...", clientAgent)
if clientAgent == 'utorrent':
torrents = tc.list()[1]['torrents']
if torrents:
for torrent in torrents:
if nzbName in torrent and download_id in torrent:
return True
for torrent in torrents:
if download_id in torrent:
return True
if clientAgent == 'transmission':
torrent = tc.get_torrent(download_id)
if torrent:
name = torrent.name
if name == nzbName:
torrents = tc.get_torrents()
for torrent in torrents:
hash = torrent.hashString
if hash == download_id:
return True
if clientAgent == 'deluge':
pass
@ -507,9 +535,9 @@ def find_download(clientAgent, nzbName, download_id):
url = baseURL
params = {}
params['apikey'] = nzbtomedia.SABNZBDAPIKEY
params['mode'] = "history"
params['mode'] = "get_files"
params['output'] = 'json'
params['value'] = download_id
try:
r = requests.get(url, params=params)
except requests.ConnectionError:
@ -517,7 +545,8 @@ def find_download(clientAgent, nzbName, download_id):
return 1 # failure
result = r.json()
pass
if result['files']:
return True
def clean_nzbname(nzbname):
@ -537,6 +566,7 @@ def clean_nzbname(nzbname):
nzbname = re.sub("^\[.*\]", "", nzbname)
return nzbname.strip()
def isMediaFile(filename):
# ignore samples
if re.search('(^|[\W_])(sample\d*)[\W_]', filename, re.I):
@ -556,6 +586,7 @@ def isMediaFile(filename):
else:
return False
def listMediaFiles(path):
if not dir or not os.path.isdir(path):
return []
@ -571,4 +602,56 @@ def listMediaFiles(path):
elif isMediaFile(curFile):
files.append(fullCurFile)
return files
return files
def find_imdbid(dirName, nzbName):
imdbid = None
nzbName = clean_nzbname(nzbName)
logger.info('Attemping imdbID lookup for %s' % (nzbName))
# find imdbid in dirName
logger.info('Searching folder name for imdbID ...')
m = re.search('(tt\d{7})', dirName)
if m:
imdbid = m.group(1)
logger.info("Found movie id %s in directory" % imdbid)
return imdbid
# find imdbid in nzbName
logger.info('Searching filename for imdbID ...')
m = re.search('(tt\d{7})', nzbName)
if m:
imdbid = m.group(1)
logger.info("Found imdbid %s in name" % imdbid)
return imdbid
logger.info('Searching IMDB for imdbID ...')
m = re.search("^(.+)(\d{4})\W", nzbName)
if m:
title = m.group(1)
year = m.group(2)
url = "http://www.omdbapi.com"
logger.debug("Opening URL: %s" % url)
try:
r = requests.get(url, params={'y': year, 't': title})
except requests.ConnectionError:
logger.error("Unable to open URL %s" % url)
return
results = r.json()
try:
imdbid = results['imdbID']
except:
pass
if imdbid:
return imdbid
else:
logger.warning('Unable to find a imdbID for %s' % (nzbName))