Merge branch 'clinton-hall/dev'

Conflicts:
	autoProcess/nzbToMediaEnv.py
This commit is contained in:
echel0n 2014-03-30 18:29:49 -07:00
commit 9969eaa47a
9 changed files with 119 additions and 130 deletions

View file

@ -21,7 +21,7 @@ import autoProcess.autoProcessMovie as autoProcessMovie
import autoProcess.autoProcessTV as autoProcessTV
from autoProcess.nzbToMediaEnv import *
from autoProcess.nzbToMediaUtil import *
from autoSickBeardFork import autoFork
from autoProcess.autoSickBeardFork import autoFork
from utorrent.client import UTorrentClient
from transmissionrpc.client import Client as TransmissionClient
from synchronousdeluge.client import DelugeClient
@ -44,6 +44,8 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
Logger.debug("MAIN: Determined Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)
sbFork, sbParams = autoFork()
if inputCategory in sbCategory and sbFork in SICKBEARD_TORRENT and Torrent_ForceLink != 1:
Logger.info("MAIN: Calling SickBeard's %s branch to post-process: %s",sbFork ,inputName)
result = autoProcessTV.processEpisode(inputDirectory, inputName, int(0))
@ -209,7 +211,7 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
except:
Logger.exception("MAIN: Failed to link file: %s", file)
# find part numbers in second "extension" from right, if we have more than 1 compressed file in the same directory.
if re.search(r'\d+', os.path.splitext(fileName)[1]) and os.path.dirname(filePath) in extracted_folder and not (os.path.splitext(fileName)[1] in ['.720p','.1080p']):
if re.search(r'\d+', os.path.splitext(fileName)[1]) and os.path.dirname(filePath) in extracted_folder and not any(item in os.path.splitext(fileName)[1] for item in ['.720p','.1080p','.x264']):
part = int(re.search(r'\d+', os.path.splitext(fileName)[1]).group())
if part == 1: # we only want to extract the primary part.
Logger.debug("MAIN: Found primary part of a multi-part archive %s. Extracting", file)
@ -315,21 +317,18 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
except:
Logger.exception("MAIN: Failed to move file: %s", file)
continue
shutil.rmtree(outputDestination)
# Hardlink solution for uTorrent, need to implent support for deluge, transmission
if clientAgent in ['utorrent', 'transmission', 'deluge'] and inputHash:
# Delete torrent and torrentdata from Torrent client if processing was successful.
if deleteOriginal == 1 and result != 1:
if (deleteOriginal == 1 and result != 1) or useLink == 'move': # added uselink = move, if we move files, nothing to resume seeding.
Logger.debug("MAIN: Deleting torrent %s from %s", inputName, clientAgent)
if clientAgent == 'utorrent' and utorrentClass != "":
utorrentClass.removedata(inputHash)
if not inputCategory in hpCategory:
utorrentClass.remove(inputHash)
utorrentClass.remove(inputHash)
if clientAgent == 'transmission' and TransmissionClass !="":
if inputCategory in hpCategory: #don't delete actual files for hp category, just remove torrent.
TransmissionClass.remove_torrent(inputID, False)
else:
TransmissionClass.remove_torrent(inputID, True)
TransmissionClass.remove_torrent(inputID, True)
if clientAgent == 'deluge' and delugeClient != "":
delugeClient.core.remove_torrent(inputID, True)
# we always want to resume seeding, for now manually find out what is wrong when extraction fails
@ -353,7 +352,7 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
if fileExtension in mediaContainer or fileExtension in metaContainer:
num_files_new = num_files_new + 1
file_list.append(file)
if num_files_new == int(0):
if num_files_new == int(0) or forceClean == 1:
Logger.info("All files have been processed. Cleaning outputDirectory %s", outputDestination)
shutil.rmtree(outputDestination)
else:
@ -483,6 +482,7 @@ if __name__ == "__main__":
DelugePWD = config.get("Torrent", "DelugePWD") # mysecretpwr
deleteOriginal = int(config.get("Torrent", "deleteOriginal")) # 0
forceClean = int(config.get("Torrent", "forceClean")) # 0
compressedContainer = (config.get("Extensions", "compressedExtensions")).split(',') # .zip,.rar,.7z
mediaContainer = (config.get("Extensions", "mediaExtensions")).split(',') # .mkv,.avi,.divx
@ -492,7 +492,6 @@ if __name__ == "__main__":
cpsCategory = (config.get("CouchPotato", "cpsCategory")).split(',') # movie
sbCategory = (config.get("SickBeard", "sbCategory")).split(',') # tv
sbFork, sbParams = autoFork(config.get("SickBeard", "fork")) # default
Torrent_ForceLink = int(config.get("SickBeard", "Torrent_ForceLink")) # 1
hpCategory = (config.get("HeadPhones", "hpCategory")).split(',') # music
mlCategory = (config.get("Mylar", "mlCategory")).split(',') # comics

View file

@ -45,13 +45,17 @@ def get_imdb(nzbName, dirName):
def get_movie_info(baseURL, imdbid, download_id):
if not imdbid and not download_id:
return "", None, imdbid
movie_id = ""
movie_status = None
release_status = None
if not imdbid and not download_id:
return movie_id, imdbid, download_id, movie_status, release_status
releaselist = []
movieid = []
moviestatus = []
library = []
release = []
offset = int(0)
while True:
url = baseURL + "media.list/?status=active&release_status=snatched&limit_offset=50," + str(offset)
@ -66,134 +70,99 @@ def get_movie_info(baseURL, imdbid, download_id):
movieid2 = []
library2 = []
release2 = []
moviestatus2 = []
try:
result = json.load(urlObj)
movieid2 = [item["id"] for item in result["movies"]]
library2 = [item["library"]["identifier"] for item in result["movies"]]
movieid2 = [item["_id"] for item in result["movies"]]
for item in result["movies"]:
if "identifier" in item:
library2.append(item["identifier"])
else:
library2.append(item["identifiers"]["imdb"])
release2 = [item["releases"] for item in result["movies"]]
moviestatus2 = [item["status"] for item in result["movies"]]
except:
Logger.exception("Unable to parse json data for movies")
break
movieid.extend(movieid2)
moviestatus.extend(moviestatus2)
library.extend(library2)
release.extend(release2)
if len(movieid2) < int(50): # finished parsing list of movies. Time to break.
break
offset = offset + 50
result = None # reset
for index in range(len(movieid)):
if not imdbid:
url = baseURL + "media.get/?id=" + str(movieid[index])
Logger.debug("Opening URL: %s", url)
try:
urlObj = urllib.urlopen(url)
except:
Logger.exception("Unable to open URL")
return "", None, imdbid
try:
result = json.load(urlObj)
releaselist = [item["info"]["download_id"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower()]
except:
Logger.exception("Unable to parse json data for releases")
return "", None, imdbid
releaselist1 = [item for item in release[index] if item["status"] == "snatched" and "download_info" in item]
if download_id:
releaselist = [item for item in releaselist1 if item["download_info"]["id"].lower() == download_id.lower()]
else:
releaselist = releaselist1
if len(releaselist) > 0:
movie_id = str(movieid[index])
imdbid = str(library[index])
Logger.info("Found movie id %s and imdb %s in database via download_id %s", movie_id, imdbid, download_id)
break
else:
continue
if library[index] == imdbid:
if imdbid and library[index] == imdbid:
movie_id = str(movieid[index])
Logger.info("Found movie id %s in CPS database for movie %s", movie_id, imdbid)
break
movie_status = str(moviestatus[index])
Logger.info("Found movie id %s with status %s in CPS database for movie %s", movie_id, movie_status, imdbid)
if not download_id and len(releaselist) == 1:
download_id = releaselist[0]["download_info"]["id"]
elif not imdbid and download_id and len(releaselist) > 0:
movie_id = str(movieid[index])
movie_status = str(moviestatus[index])
imdbid = str(library[index])
Logger.info("Found movie id %s and imdb %s with status %s in CPS database via download_id %s", movie_id, imdbid, movie_status, download_id)
else:
continue
if len(releaselist) == 1:
release_status = releaselist[0]["status"]
Logger.debug("Found a single release with download_id: %s. Release status is: %s", download_id, release_status)
break
if not movie_id:
Logger.exception("Could not parse database results to determine imdbid or movie id")
return movie_id, result, imdbid
return movie_id, imdbid, download_id, movie_status, release_status
def get_status(baseURL, movie_id, clientAgent, download_id, result=None):
def get_status(baseURL, movie_id, download_id):
movie_status = None
release_status = None
if not movie_id:
return "", clientAgent, "none", "none"
return movie_status, release_status
Logger.debug("Looking for status of movie: %s - with release sent to clientAgent: %s and download_id: %s", movie_id, clientAgent, download_id)
if not result: # we haven't already called media.get
url = baseURL + "media.get/?id=" + str(movie_id)
Logger.debug("Opening URL: %s", url)
Logger.debug("Looking for status of movie: %s", movie_id)
url = baseURL + "media.get/?id=" + str(movie_id)
Logger.debug("Opening URL: %s", url)
try:
urlObj = urllib.urlopen(url)
except:
Logger.exception("Unable to open URL")
return "", clientAgent, "none", "none"
try:
urlObj = urllib.urlopen(url)
except:
Logger.exception("Unable to open URL")
return None, None
try:
result = json.load(urlObj)
try:
movie_status = result["media"]["status"]["identifier"]
movie_status = str(result["media"]["status"])
Logger.debug("This movie is marked as status %s in CouchPotatoServer", movie_status)
except: # index out of range/doesn't exist?
except:
Logger.exception("Could not find a status for this movie")
movie_status = ""
try:
release_status = "none"
if download_id != "" and download_id != "none": # we have the download id from the downloader. Let's see if it's valid.
release_statuslist = [item["status"]["identifier"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower()]
clientAgentlist = [item["info"]["download_downloader"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower()]
if len(release_statuslist) == 1: # we have found a release by this id. :)
release_status = release_statuslist[0]
clientAgent = clientAgentlist[0]
Logger.debug("Found a single release with download_id: %s for clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
return movie_status, clientAgent, download_id, release_status
elif len(release_statuslist) > 1: # we have found many releases by this id. Check for snatched status
clients = [item for item in clientAgentlist if item.lower() == clientAgent.lower()]
clientAgent = clients[0]
if len(clients) == 1: # ok.. a unique entry for download_id and clientAgent ;)
release_status = [item["status"]["identifier"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower() and item["info"]["download_downloader"] == clientAgent][0]
Logger.debug("Found a single release for download_id: %s and clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
else: # doesn't matter. only really used as secondary confirmation of movie status change. Let's continue.
Logger.debug("Found several releases for download_id: %s and clientAgent: %s. Cannot determine the release status", download_id, clientAgent)
return movie_status, clientAgent, download_id, release_status
else: # clearly the id we were passed doesn't match the database. Reset it and search all snatched releases.... hence the next if (not elif ;) )
download_id = ""
if download_id == "none": # if we couldn't find this initially, there is no need to check next time around.
return movie_status, clientAgent, download_id, release_status
elif download_id == "": # in case we didn't get this from the downloader.
download_idlist = [item["info"]["download_id"] for item in result["media"]["releases"] if item["status"]["identifier"] == "snatched"]
clientAgentlist = [item["info"]["download_downloader"] for item in result["media"]["releases"] if item["status"]["identifier"] == "snatched"]
if len(clientAgentlist) == 1:
if clientAgent == "manual":
clientAgent = clientAgentlist[0]
download_id = download_idlist[0]
release_status = "snatched"
elif clientAgent.lower() == clientAgentlist[0].lower():
download_id = download_idlist[0]
clientAgent = clientAgentlist[0]
release_status = "snatched"
Logger.debug("Found a single download_id: %s and clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
elif clientAgent == "manual":
download_id = "none"
release_status = "none"
else:
index = [index for index in range(len(clientAgentlist)) if clientAgentlist[index].lower() == clientAgent.lower()]
if len(index) == 1:
download_id = download_idlist[index[0]]
clientAgent = clientAgentlist[index[0]]
release_status = "snatched"
Logger.debug("Found download_id: %s for clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
else:
Logger.info("Found a total of %s releases snatched for clientAgent: %s. Cannot determine download_id. Will perform a renamenr scan to try and process.", len(index), clientAgent)
download_id = "none"
release_status = "none"
else: #something went wrong here.... we should never get to this.
Logger.info("Could not find a download_id in the database for this movie")
release_status = "none"
if len(result["media"]["releases"]) == 1 and result["media"]["releases"][0]["status"] == "done":
release_status = result["media"]["releases"][0]["status"]
else:
release_status_list = [item["status"] for item in result["media"]["releases"] if "download_info" in item and item["download_info"]["id"].lower() == download_id.lower()]
if len(release_status_list) == 1:
release_status = release_status_list[0]
Logger.debug("This release is marked as status %s in CouchPotatoServer", release_status)
except: # index out of range/doesn't exist?
Logger.exception("Could not find a download_id for this movie")
download_id = "none"
return movie_status, clientAgent, download_id, release_status
Logger.exception("Could not find a status for this release")
return movie_status, release_status
def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None):
@ -254,9 +223,7 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey + "/"
movie_id, result, imdbid = get_movie_info(baseURL, imdbid, download_id) # get the CPS database movie id for this movie.
initial_status, clientAgent, download_id, initial_release_status = get_status(baseURL, movie_id, clientAgent, download_id, result)
movie_id, imdbid, download_id, initial_status, initial_release_status = get_movie_info(baseURL, imdbid, download_id) # get the CPS database movie id for this movie.
process_all_exceptions(nzbName.lower(), dirName)
nzbName, dirName = converto_to_ascii(nzbName, dirName)
@ -276,7 +243,7 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
command = "manage.update"
else:
command = "renamer.scan"
if clientAgent != "manual" and download_id != "none":
if clientAgent != "manual" and download_id != None:
if remoteCPS == 1:
command = command + "/?downloader=" + clientAgent + "&download_id=" + download_id
else:
@ -314,7 +281,7 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
Logger.error("Exiting autoProcessMovie script")
return 1 # failure
url = baseURL + "movie.searcher.try_next/?id=" + movie_id
url = baseURL + "movie.searcher.try_next/?media_id=" + movie_id
Logger.debug("Opening URL: %s", url)
@ -337,8 +304,10 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
Logger.exception("Unable to delete folder %s", dirName)
return 0 # success
if nzbName == "Manual Run" or download_id == "none":
if nzbName == "Manual Run":
return 0 # success
if not download_id:
return 1 # just to be sure TorrentToMedia doesn't start deleting files as we havent verified changed status.
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
socket.setdefaulttimeout(int(TimeOut)) #initialize socket timeout.
@ -346,13 +315,13 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
start = datetime.datetime.now() # set time for timeout
pause_for = int(wait_for) * 10 # keep this so we only ever have 6 complete loops. This may not be necessary now?
while (datetime.datetime.now() - start) < datetime.timedelta(minutes=wait_for): # only wait 2 (default) minutes, then return.
movie_status, clientAgent, download_id, release_status = get_status(baseURL, movie_id, clientAgent, download_id) # get the current status fo this movie.
if movie_status != initial_status: # Something has changed. CPS must have processed this movie.
movie_status, release_status = get_status(baseURL, movie_id, download_id) # get the current status fo this movie.
if movie_status and initial_status and movie_status != initial_status: # Something has changed. CPS must have processed this movie.
Logger.info("SUCCESS: This movie is now marked as status %s in CouchPotatoServer", movie_status)
return 0 # success
time.sleep(pause_for) # Just stop this looping infinitely and hogging resources for 2 minutes ;)
else:
if release_status != initial_release_status and release_status != "none": # Something has changed. CPS must have processed this movie.
if release_status and initial_release_status and release_status != initial_release_status: # Something has changed. CPS must have processed this movie.
Logger.info("SUCCESS: This release is now marked as status %s in CouchPotatoServer", release_status)
return 0 # success
else: # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now.

View file

@ -130,12 +130,11 @@ def processEpisode(dirName, nzbName=None, failed=False, clientAgent=None, inputC
# auto-detect fork type
fork, params = autoFork()
if (not fork in SICKBEARD_TORRENT) or (clientAgent in ['nzbget','sabnzbd'] and not nzbExtractionBy == "Destination"):
if nzbName != "Manual Run" and (not fork in SICKBEARD_TORRENT or (clientAgent in ['nzbget','sabnzbd'] and not nzbExtractionBy == "Destination")):
process_all_exceptions(nzbName.lower(), dirName)
nzbName, dirName = converto_to_ascii(nzbName, dirName)
if nzbName != "Manual Run" and not fork in SICKBEARD_TORRENT:
# Now check if movie files exist in destination:
# Now check if movie files exist in destination. Eventually extraction may be done here if nzbExtractionBy == TorrentToMedia
video = int(0)
for dirpath, dirnames, filenames in os.walk(dirName):
for file in filenames:
@ -176,6 +175,12 @@ def processEpisode(dirName, nzbName=None, failed=False, clientAgent=None, inputC
else:
del params[param]
if param is "process":
params["process"] = None
if param is "process_method":
params["process_method"] = None
if nzbName != None:
params['nzbName'] = nzbName

View file

@ -328,7 +328,7 @@ def addnzbget():
envKeys = ['CATEGORY', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT']
cfgKeys = ['mlCategory', 'host', 'port', 'username', 'password', 'ssl', 'web_root']
for index in range(len(envKeys)):
key = 'NZBPO_ML' + envKeys[index]
key = 'NZBPO_MY' + envKeys[index]
if os.environ.has_key(key):
option = cfgKeys[index]
value = os.environ[key]

View file

@ -1,7 +1,7 @@
# Make things easy and less error prone by centralising all common values
# Global Constants
VERSION = 'V9.2'
VERSION = 'V9.3'
TimeOut = 60
# Constants pertinant to SabNzb

View file

@ -105,6 +105,7 @@ DelugeUSR = your username
DelugePWD = your password
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
deleteOriginal = 0
forceClean = 0
[Extensions]
compressedExtensions = .zip,.rar,.7z,.gz,.bz,.tar,.arj,.1,.01,.001

View file

@ -1,5 +1,20 @@
Change_LOG / History
V9.3 XX/XX/2014
Impacts Torrents
Allow Headphones to remove torrents and data after processing.
Delete torrent if uselink = move
Added forceClean for outputDir. Works in file permissions prevent CP/SB from moving files.
Ignore .x264 from archive "part" checks.
Impacts NZBs
Fix setting of Mylar config from NZBGet.
Impacts All
Changes to Couchpotato API for [nosql] added. Keeps aligned with current CouchPotato develop branch.
Add Auto Detection of SickBeard Fork.
V9.2 05/03/2014
Impacts All

View file

@ -109,7 +109,7 @@
# SickBeard fork.
#
# set to default or auto to auto-detect the custom failed fork type".
# set to default or auto to auto-detect the custom fork type.
#sbfork=auto
# SickBeard Delete Failed Downloads (0, 1)

View file

@ -58,8 +58,8 @@
# SickBeard fork.
#
# set to default or TPB or failed if using the custom "TPB" or "failed fork".
#sbfork=default
# set to default or auto to auto-detect the custom fork type.
#sbfork=auto
# SickBeard Delete Failed Downloads (0, 1).
#