Merge branch 'clinton-hall/dev'

Conflicts:
	autoProcess/nzbToMediaEnv.py
This commit is contained in:
echel0n 2014-03-30 18:29:49 -07:00
commit 9969eaa47a
9 changed files with 119 additions and 130 deletions

View file

@ -21,7 +21,7 @@ import autoProcess.autoProcessMovie as autoProcessMovie
import autoProcess.autoProcessTV as autoProcessTV import autoProcess.autoProcessTV as autoProcessTV
from autoProcess.nzbToMediaEnv import * from autoProcess.nzbToMediaEnv import *
from autoProcess.nzbToMediaUtil import * from autoProcess.nzbToMediaUtil import *
from autoSickBeardFork import autoFork from autoProcess.autoSickBeardFork import autoFork
from utorrent.client import UTorrentClient from utorrent.client import UTorrentClient
from transmissionrpc.client import Client as TransmissionClient from transmissionrpc.client import Client as TransmissionClient
from synchronousdeluge.client import DelugeClient from synchronousdeluge.client import DelugeClient
@ -44,6 +44,8 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
Logger.debug("MAIN: Determined Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory) Logger.debug("MAIN: Determined Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)
sbFork, sbParams = autoFork()
if inputCategory in sbCategory and sbFork in SICKBEARD_TORRENT and Torrent_ForceLink != 1: if inputCategory in sbCategory and sbFork in SICKBEARD_TORRENT and Torrent_ForceLink != 1:
Logger.info("MAIN: Calling SickBeard's %s branch to post-process: %s",sbFork ,inputName) Logger.info("MAIN: Calling SickBeard's %s branch to post-process: %s",sbFork ,inputName)
result = autoProcessTV.processEpisode(inputDirectory, inputName, int(0)) result = autoProcessTV.processEpisode(inputDirectory, inputName, int(0))
@ -209,7 +211,7 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
except: except:
Logger.exception("MAIN: Failed to link file: %s", file) Logger.exception("MAIN: Failed to link file: %s", file)
# find part numbers in second "extension" from right, if we have more than 1 compressed file in the same directory. # find part numbers in second "extension" from right, if we have more than 1 compressed file in the same directory.
if re.search(r'\d+', os.path.splitext(fileName)[1]) and os.path.dirname(filePath) in extracted_folder and not (os.path.splitext(fileName)[1] in ['.720p','.1080p']): if re.search(r'\d+', os.path.splitext(fileName)[1]) and os.path.dirname(filePath) in extracted_folder and not any(item in os.path.splitext(fileName)[1] for item in ['.720p','.1080p','.x264']):
part = int(re.search(r'\d+', os.path.splitext(fileName)[1]).group()) part = int(re.search(r'\d+', os.path.splitext(fileName)[1]).group())
if part == 1: # we only want to extract the primary part. if part == 1: # we only want to extract the primary part.
Logger.debug("MAIN: Found primary part of a multi-part archive %s. Extracting", file) Logger.debug("MAIN: Found primary part of a multi-part archive %s. Extracting", file)
@ -315,21 +317,18 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
except: except:
Logger.exception("MAIN: Failed to move file: %s", file) Logger.exception("MAIN: Failed to move file: %s", file)
continue continue
shutil.rmtree(outputDestination)
# Hardlink solution for uTorrent, need to implent support for deluge, transmission # Hardlink solution for uTorrent, need to implent support for deluge, transmission
if clientAgent in ['utorrent', 'transmission', 'deluge'] and inputHash: if clientAgent in ['utorrent', 'transmission', 'deluge'] and inputHash:
# Delete torrent and torrentdata from Torrent client if processing was successful. # Delete torrent and torrentdata from Torrent client if processing was successful.
if deleteOriginal == 1 and result != 1: if (deleteOriginal == 1 and result != 1) or useLink == 'move': # added uselink = move, if we move files, nothing to resume seeding.
Logger.debug("MAIN: Deleting torrent %s from %s", inputName, clientAgent) Logger.debug("MAIN: Deleting torrent %s from %s", inputName, clientAgent)
if clientAgent == 'utorrent' and utorrentClass != "": if clientAgent == 'utorrent' and utorrentClass != "":
utorrentClass.removedata(inputHash) utorrentClass.removedata(inputHash)
if not inputCategory in hpCategory: utorrentClass.remove(inputHash)
utorrentClass.remove(inputHash)
if clientAgent == 'transmission' and TransmissionClass !="": if clientAgent == 'transmission' and TransmissionClass !="":
if inputCategory in hpCategory: #don't delete actual files for hp category, just remove torrent. TransmissionClass.remove_torrent(inputID, True)
TransmissionClass.remove_torrent(inputID, False)
else:
TransmissionClass.remove_torrent(inputID, True)
if clientAgent == 'deluge' and delugeClient != "": if clientAgent == 'deluge' and delugeClient != "":
delugeClient.core.remove_torrent(inputID, True) delugeClient.core.remove_torrent(inputID, True)
# we always want to resume seeding, for now manually find out what is wrong when extraction fails # we always want to resume seeding, for now manually find out what is wrong when extraction fails
@ -353,7 +352,7 @@ def main(inputDirectory, inputName, inputCategory, inputHash, inputID):
if fileExtension in mediaContainer or fileExtension in metaContainer: if fileExtension in mediaContainer or fileExtension in metaContainer:
num_files_new = num_files_new + 1 num_files_new = num_files_new + 1
file_list.append(file) file_list.append(file)
if num_files_new == int(0): if num_files_new == int(0) or forceClean == 1:
Logger.info("All files have been processed. Cleaning outputDirectory %s", outputDestination) Logger.info("All files have been processed. Cleaning outputDirectory %s", outputDestination)
shutil.rmtree(outputDestination) shutil.rmtree(outputDestination)
else: else:
@ -483,6 +482,7 @@ if __name__ == "__main__":
DelugePWD = config.get("Torrent", "DelugePWD") # mysecretpwr DelugePWD = config.get("Torrent", "DelugePWD") # mysecretpwr
deleteOriginal = int(config.get("Torrent", "deleteOriginal")) # 0 deleteOriginal = int(config.get("Torrent", "deleteOriginal")) # 0
forceClean = int(config.get("Torrent", "forceClean")) # 0
compressedContainer = (config.get("Extensions", "compressedExtensions")).split(',') # .zip,.rar,.7z compressedContainer = (config.get("Extensions", "compressedExtensions")).split(',') # .zip,.rar,.7z
mediaContainer = (config.get("Extensions", "mediaExtensions")).split(',') # .mkv,.avi,.divx mediaContainer = (config.get("Extensions", "mediaExtensions")).split(',') # .mkv,.avi,.divx
@ -492,7 +492,6 @@ if __name__ == "__main__":
cpsCategory = (config.get("CouchPotato", "cpsCategory")).split(',') # movie cpsCategory = (config.get("CouchPotato", "cpsCategory")).split(',') # movie
sbCategory = (config.get("SickBeard", "sbCategory")).split(',') # tv sbCategory = (config.get("SickBeard", "sbCategory")).split(',') # tv
sbFork, sbParams = autoFork(config.get("SickBeard", "fork")) # default
Torrent_ForceLink = int(config.get("SickBeard", "Torrent_ForceLink")) # 1 Torrent_ForceLink = int(config.get("SickBeard", "Torrent_ForceLink")) # 1
hpCategory = (config.get("HeadPhones", "hpCategory")).split(',') # music hpCategory = (config.get("HeadPhones", "hpCategory")).split(',') # music
mlCategory = (config.get("Mylar", "mlCategory")).split(',') # comics mlCategory = (config.get("Mylar", "mlCategory")).split(',') # comics

View file

@ -44,14 +44,18 @@ def get_imdb(nzbName, dirName):
return "" return ""
def get_movie_info(baseURL, imdbid, download_id): def get_movie_info(baseURL, imdbid, download_id):
if not imdbid and not download_id:
return "", None, imdbid
movie_id = "" movie_id = ""
movie_status = None
release_status = None
if not imdbid and not download_id:
return movie_id, imdbid, download_id, movie_status, release_status
releaselist = [] releaselist = []
movieid = [] movieid = []
moviestatus = []
library = [] library = []
release = []
offset = int(0) offset = int(0)
while True: while True:
url = baseURL + "media.list/?status=active&release_status=snatched&limit_offset=50," + str(offset) url = baseURL + "media.list/?status=active&release_status=snatched&limit_offset=50," + str(offset)
@ -66,134 +70,99 @@ def get_movie_info(baseURL, imdbid, download_id):
movieid2 = [] movieid2 = []
library2 = [] library2 = []
release2 = []
moviestatus2 = []
try: try:
result = json.load(urlObj) result = json.load(urlObj)
movieid2 = [item["id"] for item in result["movies"]] movieid2 = [item["_id"] for item in result["movies"]]
library2 = [item["library"]["identifier"] for item in result["movies"]] for item in result["movies"]:
if "identifier" in item:
library2.append(item["identifier"])
else:
library2.append(item["identifiers"]["imdb"])
release2 = [item["releases"] for item in result["movies"]]
moviestatus2 = [item["status"] for item in result["movies"]]
except: except:
Logger.exception("Unable to parse json data for movies") Logger.exception("Unable to parse json data for movies")
break break
movieid.extend(movieid2) movieid.extend(movieid2)
moviestatus.extend(moviestatus2)
library.extend(library2) library.extend(library2)
release.extend(release2)
if len(movieid2) < int(50): # finished parsing list of movies. Time to break. if len(movieid2) < int(50): # finished parsing list of movies. Time to break.
break break
offset = offset + 50 offset = offset + 50
result = None # reset result = None # reset
for index in range(len(movieid)): for index in range(len(movieid)):
if not imdbid: releaselist1 = [item for item in release[index] if item["status"] == "snatched" and "download_info" in item]
url = baseURL + "media.get/?id=" + str(movieid[index]) if download_id:
Logger.debug("Opening URL: %s", url) releaselist = [item for item in releaselist1 if item["download_info"]["id"].lower() == download_id.lower()]
try: else:
urlObj = urllib.urlopen(url) releaselist = releaselist1
except:
Logger.exception("Unable to open URL")
return "", None, imdbid
try:
result = json.load(urlObj)
releaselist = [item["info"]["download_id"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower()]
except:
Logger.exception("Unable to parse json data for releases")
return "", None, imdbid
if len(releaselist) > 0: if imdbid and library[index] == imdbid:
movie_id = str(movieid[index])
imdbid = str(library[index])
Logger.info("Found movie id %s and imdb %s in database via download_id %s", movie_id, imdbid, download_id)
break
else:
continue
if library[index] == imdbid:
movie_id = str(movieid[index]) movie_id = str(movieid[index])
Logger.info("Found movie id %s in CPS database for movie %s", movie_id, imdbid) movie_status = str(moviestatus[index])
break Logger.info("Found movie id %s with status %s in CPS database for movie %s", movie_id, movie_status, imdbid)
if not download_id and len(releaselist) == 1:
download_id = releaselist[0]["download_info"]["id"]
elif not imdbid and download_id and len(releaselist) > 0:
movie_id = str(movieid[index])
movie_status = str(moviestatus[index])
imdbid = str(library[index])
Logger.info("Found movie id %s and imdb %s with status %s in CPS database via download_id %s", movie_id, imdbid, movie_status, download_id)
else:
continue
if len(releaselist) == 1:
release_status = releaselist[0]["status"]
Logger.debug("Found a single release with download_id: %s. Release status is: %s", download_id, release_status)
break
if not movie_id: if not movie_id:
Logger.exception("Could not parse database results to determine imdbid or movie id") Logger.exception("Could not parse database results to determine imdbid or movie id")
return movie_id, result, imdbid return movie_id, imdbid, download_id, movie_status, release_status
def get_status(baseURL, movie_id, clientAgent, download_id, result=None): def get_status(baseURL, movie_id, download_id):
movie_status = None
release_status = None
if not movie_id: if not movie_id:
return "", clientAgent, "none", "none" return movie_status, release_status
Logger.debug("Looking for status of movie: %s - with release sent to clientAgent: %s and download_id: %s", movie_id, clientAgent, download_id) Logger.debug("Looking for status of movie: %s", movie_id)
if not result: # we haven't already called media.get url = baseURL + "media.get/?id=" + str(movie_id)
url = baseURL + "media.get/?id=" + str(movie_id) Logger.debug("Opening URL: %s", url)
Logger.debug("Opening URL: %s", url)
try: try:
urlObj = urllib.urlopen(url) urlObj = urllib.urlopen(url)
except: except:
Logger.exception("Unable to open URL") Logger.exception("Unable to open URL")
return "", clientAgent, "none", "none" return None, None
try:
result = json.load(urlObj) result = json.load(urlObj)
try: movie_status = str(result["media"]["status"])
movie_status = result["media"]["status"]["identifier"]
Logger.debug("This movie is marked as status %s in CouchPotatoServer", movie_status) Logger.debug("This movie is marked as status %s in CouchPotatoServer", movie_status)
except: # index out of range/doesn't exist? except:
Logger.exception("Could not find a status for this movie") Logger.exception("Could not find a status for this movie")
movie_status = ""
try: try:
release_status = "none" if len(result["media"]["releases"]) == 1 and result["media"]["releases"][0]["status"] == "done":
if download_id != "" and download_id != "none": # we have the download id from the downloader. Let's see if it's valid. release_status = result["media"]["releases"][0]["status"]
release_statuslist = [item["status"]["identifier"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower()] else:
clientAgentlist = [item["info"]["download_downloader"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower()] release_status_list = [item["status"] for item in result["media"]["releases"] if "download_info" in item and item["download_info"]["id"].lower() == download_id.lower()]
if len(release_statuslist) == 1: # we have found a release by this id. :) if len(release_status_list) == 1:
release_status = release_statuslist[0] release_status = release_status_list[0]
clientAgent = clientAgentlist[0] Logger.debug("This release is marked as status %s in CouchPotatoServer", release_status)
Logger.debug("Found a single release with download_id: %s for clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
return movie_status, clientAgent, download_id, release_status
elif len(release_statuslist) > 1: # we have found many releases by this id. Check for snatched status
clients = [item for item in clientAgentlist if item.lower() == clientAgent.lower()]
clientAgent = clients[0]
if len(clients) == 1: # ok.. a unique entry for download_id and clientAgent ;)
release_status = [item["status"]["identifier"] for item in result["media"]["releases"] if "download_id" in item["info"] and item["info"]["download_id"].lower() == download_id.lower() and item["info"]["download_downloader"] == clientAgent][0]
Logger.debug("Found a single release for download_id: %s and clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
else: # doesn't matter. only really used as secondary confirmation of movie status change. Let's continue.
Logger.debug("Found several releases for download_id: %s and clientAgent: %s. Cannot determine the release status", download_id, clientAgent)
return movie_status, clientAgent, download_id, release_status
else: # clearly the id we were passed doesn't match the database. Reset it and search all snatched releases.... hence the next if (not elif ;) )
download_id = ""
if download_id == "none": # if we couldn't find this initially, there is no need to check next time around.
return movie_status, clientAgent, download_id, release_status
elif download_id == "": # in case we didn't get this from the downloader.
download_idlist = [item["info"]["download_id"] for item in result["media"]["releases"] if item["status"]["identifier"] == "snatched"]
clientAgentlist = [item["info"]["download_downloader"] for item in result["media"]["releases"] if item["status"]["identifier"] == "snatched"]
if len(clientAgentlist) == 1:
if clientAgent == "manual":
clientAgent = clientAgentlist[0]
download_id = download_idlist[0]
release_status = "snatched"
elif clientAgent.lower() == clientAgentlist[0].lower():
download_id = download_idlist[0]
clientAgent = clientAgentlist[0]
release_status = "snatched"
Logger.debug("Found a single download_id: %s and clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
elif clientAgent == "manual":
download_id = "none"
release_status = "none"
else:
index = [index for index in range(len(clientAgentlist)) if clientAgentlist[index].lower() == clientAgent.lower()]
if len(index) == 1:
download_id = download_idlist[index[0]]
clientAgent = clientAgentlist[index[0]]
release_status = "snatched"
Logger.debug("Found download_id: %s for clientAgent: %s. Release status is: %s", download_id, clientAgent, release_status)
else:
Logger.info("Found a total of %s releases snatched for clientAgent: %s. Cannot determine download_id. Will perform a renamenr scan to try and process.", len(index), clientAgent)
download_id = "none"
release_status = "none"
else: #something went wrong here.... we should never get to this.
Logger.info("Could not find a download_id in the database for this movie")
release_status = "none"
except: # index out of range/doesn't exist? except: # index out of range/doesn't exist?
Logger.exception("Could not find a download_id for this movie") Logger.exception("Could not find a status for this release")
download_id = "none"
return movie_status, clientAgent, download_id, release_status return movie_status, release_status
def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None): def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None):
@ -254,9 +223,7 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey + "/" baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey + "/"
movie_id, result, imdbid = get_movie_info(baseURL, imdbid, download_id) # get the CPS database movie id for this movie. movie_id, imdbid, download_id, initial_status, initial_release_status = get_movie_info(baseURL, imdbid, download_id) # get the CPS database movie id for this movie.
initial_status, clientAgent, download_id, initial_release_status = get_status(baseURL, movie_id, clientAgent, download_id, result)
process_all_exceptions(nzbName.lower(), dirName) process_all_exceptions(nzbName.lower(), dirName)
nzbName, dirName = converto_to_ascii(nzbName, dirName) nzbName, dirName = converto_to_ascii(nzbName, dirName)
@ -276,7 +243,7 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
command = "manage.update" command = "manage.update"
else: else:
command = "renamer.scan" command = "renamer.scan"
if clientAgent != "manual" and download_id != "none": if clientAgent != "manual" and download_id != None:
if remoteCPS == 1: if remoteCPS == 1:
command = command + "/?downloader=" + clientAgent + "&download_id=" + download_id command = command + "/?downloader=" + clientAgent + "&download_id=" + download_id
else: else:
@ -314,7 +281,7 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
Logger.error("Exiting autoProcessMovie script") Logger.error("Exiting autoProcessMovie script")
return 1 # failure return 1 # failure
url = baseURL + "movie.searcher.try_next/?id=" + movie_id url = baseURL + "movie.searcher.try_next/?media_id=" + movie_id
Logger.debug("Opening URL: %s", url) Logger.debug("Opening URL: %s", url)
@ -337,8 +304,10 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
Logger.exception("Unable to delete folder %s", dirName) Logger.exception("Unable to delete folder %s", dirName)
return 0 # success return 0 # success
if nzbName == "Manual Run" or download_id == "none": if nzbName == "Manual Run":
return 0 # success return 0 # success
if not download_id:
return 1 # just to be sure TorrentToMedia doesn't start deleting files as we havent verified changed status.
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
socket.setdefaulttimeout(int(TimeOut)) #initialize socket timeout. socket.setdefaulttimeout(int(TimeOut)) #initialize socket timeout.
@ -346,13 +315,13 @@ def process(dirName, nzbName=None, status=0, clientAgent = "manual", download_id
start = datetime.datetime.now() # set time for timeout start = datetime.datetime.now() # set time for timeout
pause_for = int(wait_for) * 10 # keep this so we only ever have 6 complete loops. This may not be necessary now? pause_for = int(wait_for) * 10 # keep this so we only ever have 6 complete loops. This may not be necessary now?
while (datetime.datetime.now() - start) < datetime.timedelta(minutes=wait_for): # only wait 2 (default) minutes, then return. while (datetime.datetime.now() - start) < datetime.timedelta(minutes=wait_for): # only wait 2 (default) minutes, then return.
movie_status, clientAgent, download_id, release_status = get_status(baseURL, movie_id, clientAgent, download_id) # get the current status fo this movie. movie_status, release_status = get_status(baseURL, movie_id, download_id) # get the current status fo this movie.
if movie_status != initial_status: # Something has changed. CPS must have processed this movie. if movie_status and initial_status and movie_status != initial_status: # Something has changed. CPS must have processed this movie.
Logger.info("SUCCESS: This movie is now marked as status %s in CouchPotatoServer", movie_status) Logger.info("SUCCESS: This movie is now marked as status %s in CouchPotatoServer", movie_status)
return 0 # success return 0 # success
time.sleep(pause_for) # Just stop this looping infinitely and hogging resources for 2 minutes ;) time.sleep(pause_for) # Just stop this looping infinitely and hogging resources for 2 minutes ;)
else: else:
if release_status != initial_release_status and release_status != "none": # Something has changed. CPS must have processed this movie. if release_status and initial_release_status and release_status != initial_release_status: # Something has changed. CPS must have processed this movie.
Logger.info("SUCCESS: This release is now marked as status %s in CouchPotatoServer", release_status) Logger.info("SUCCESS: This release is now marked as status %s in CouchPotatoServer", release_status)
return 0 # success return 0 # success
else: # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now. else: # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now.

View file

@ -130,12 +130,11 @@ def processEpisode(dirName, nzbName=None, failed=False, clientAgent=None, inputC
# auto-detect fork type # auto-detect fork type
fork, params = autoFork() fork, params = autoFork()
if (not fork in SICKBEARD_TORRENT) or (clientAgent in ['nzbget','sabnzbd'] and not nzbExtractionBy == "Destination"): if nzbName != "Manual Run" and (not fork in SICKBEARD_TORRENT or (clientAgent in ['nzbget','sabnzbd'] and not nzbExtractionBy == "Destination")):
process_all_exceptions(nzbName.lower(), dirName) process_all_exceptions(nzbName.lower(), dirName)
nzbName, dirName = converto_to_ascii(nzbName, dirName) nzbName, dirName = converto_to_ascii(nzbName, dirName)
if nzbName != "Manual Run" and not fork in SICKBEARD_TORRENT: # Now check if movie files exist in destination. Eventually extraction may be done here if nzbExtractionBy == TorrentToMedia
# Now check if movie files exist in destination:
video = int(0) video = int(0)
for dirpath, dirnames, filenames in os.walk(dirName): for dirpath, dirnames, filenames in os.walk(dirName):
for file in filenames: for file in filenames:
@ -176,6 +175,12 @@ def processEpisode(dirName, nzbName=None, failed=False, clientAgent=None, inputC
else: else:
del params[param] del params[param]
if param is "process":
params["process"] = None
if param is "process_method":
params["process_method"] = None
if nzbName != None: if nzbName != None:
params['nzbName'] = nzbName params['nzbName'] = nzbName

View file

@ -328,7 +328,7 @@ def addnzbget():
envKeys = ['CATEGORY', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT'] envKeys = ['CATEGORY', 'HOST', 'PORT', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT']
cfgKeys = ['mlCategory', 'host', 'port', 'username', 'password', 'ssl', 'web_root'] cfgKeys = ['mlCategory', 'host', 'port', 'username', 'password', 'ssl', 'web_root']
for index in range(len(envKeys)): for index in range(len(envKeys)):
key = 'NZBPO_ML' + envKeys[index] key = 'NZBPO_MY' + envKeys[index]
if os.environ.has_key(key): if os.environ.has_key(key):
option = cfgKeys[index] option = cfgKeys[index]
value = os.environ[key] value = os.environ[key]

View file

@ -1,7 +1,7 @@
# Make things easy and less error prone by centralising all common values # Make things easy and less error prone by centralising all common values
# Global Constants # Global Constants
VERSION = 'V9.2' VERSION = 'V9.3'
TimeOut = 60 TimeOut = 60
# Constants pertinant to SabNzb # Constants pertinant to SabNzb

View file

@ -105,6 +105,7 @@ DelugeUSR = your username
DelugePWD = your password DelugePWD = your password
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ###### ###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
deleteOriginal = 0 deleteOriginal = 0
forceClean = 0
[Extensions] [Extensions]
compressedExtensions = .zip,.rar,.7z,.gz,.bz,.tar,.arj,.1,.01,.001 compressedExtensions = .zip,.rar,.7z,.gz,.bz,.tar,.arj,.1,.01,.001

View file

@ -1,5 +1,20 @@
Change_LOG / History Change_LOG / History
V9.3 XX/XX/2014
Impacts Torrents
Allow Headphones to remove torrents and data after processing.
Delete torrent if uselink = move
Added forceClean for outputDir. Works in file permissions prevent CP/SB from moving files.
Ignore .x264 from archive "part" checks.
Impacts NZBs
Fix setting of Mylar config from NZBGet.
Impacts All
Changes to Couchpotato API for [nosql] added. Keeps aligned with current CouchPotato develop branch.
Add Auto Detection of SickBeard Fork.
V9.2 05/03/2014 V9.2 05/03/2014
Impacts All Impacts All

View file

@ -109,7 +109,7 @@
# SickBeard fork. # SickBeard fork.
# #
# set to default or auto to auto-detect the custom failed fork type". # set to default or auto to auto-detect the custom fork type.
#sbfork=auto #sbfork=auto
# SickBeard Delete Failed Downloads (0, 1) # SickBeard Delete Failed Downloads (0, 1)

View file

@ -58,8 +58,8 @@
# SickBeard fork. # SickBeard fork.
# #
# set to default or TPB or failed if using the custom "TPB" or "failed fork". # set to default or auto to auto-detect the custom fork type.
#sbfork=default #sbfork=auto
# SickBeard Delete Failed Downloads (0, 1). # SickBeard Delete Failed Downloads (0, 1).
# #