Added remote_path option to CouchPotato section, allows setting the basepath for where CouchPotato looks for its video files on the server its installed on.

Re-coded autoProcessMovies, it now will attempt to get imdbID from dirName, nzbName and if both fail it then uses a regex to get the movie title and uses that to perform a search on CouchPotato.

We also now get the release_id and use that instead of movie id.

Fixed a bug that was preventing failed downloads from working properly for CouchPotato.
This commit is contained in:
echel0n 2014-04-13 10:32:10 -07:00
commit 77e07be6c8
5 changed files with 134 additions and 214 deletions

View file

@ -20,12 +20,11 @@
ssl = 0 ssl = 0
web_root = web_root =
delay = 65 delay = 65
TimePerGiB = 60
method = renamer method = renamer
delete_failed = 0 delete_failed = 0
wait_for = 2 wait_for = 2
#### Set to 1 if CouchPotatoServer is running on a different server to your NZB client #### Set the remote path for CouchPotatoServer if its running on a different server then your download client
remoteCPS = 0 remote_path =
watch_dir = watch_dir =
[SickBeard] [SickBeard]

View file

@ -353,6 +353,7 @@ def main():
# Post-Processing Result # Post-Processing Result
result = 0 result = 0
status = 0
# NZBGet V11+ # NZBGet V11+
# Check if the script is called from nzbget 11.0 or later # Check if the script is called from nzbget 11.0 or later
@ -401,7 +402,7 @@ def main():
download_id = os.environ['NZBPR_COUCHPOTATO'] download_id = os.environ['NZBPR_COUCHPOTATO']
# All checks done, now launching the script. # All checks done, now launching the script.
result = process(os.environ['NZBPP_DIRECTORY'], inputName=os.environ['NZBPP_NZBFILENAME'], clientAgent = "nzbget", download_id=download_id, inputCategory=os.environ['NZBPP_CATEGORY']) result = process(os.environ['NZBPP_DIRECTORY'], inputName=os.environ['NZBPP_NZBFILENAME'], status=status, clientAgent = "nzbget", download_id=download_id, inputCategory=os.environ['NZBPP_CATEGORY'])
# SABnzbd Pre 0.7.17 # SABnzbd Pre 0.7.17
elif len(sys.argv) == nzbtomedia.SABNZB_NO_OF_ARGUMENTS: elif len(sys.argv) == nzbtomedia.SABNZB_NO_OF_ARGUMENTS:
# SABnzbd argv: # SABnzbd argv:

View file

@ -1,137 +1,109 @@
import os
import re
import time import time
import datetime import datetime
import socket
import urllib import urllib
import shutil import shutil
import sys
import platform
import nzbtomedia import nzbtomedia
from lib import requests from lib import requests
from nzbtomedia.Transcoder import Transcoder from nzbtomedia.Transcoder import Transcoder
from nzbtomedia.nzbToMediaSceneExceptions import process_all_exceptions from nzbtomedia.nzbToMediaSceneExceptions import process_all_exceptions
from nzbtomedia.nzbToMediaUtil import getDirectorySize, convert_to_ascii from nzbtomedia.nzbToMediaUtil import convert_to_ascii
from nzbtomedia import logger from nzbtomedia import logger
class autoProcessMovie: class autoProcessMovie:
def find_media_id(self, baseURL, download_id, dirName, nzbName):
def get_imdb(self, nzbName, dirName): imdbid = None
imdbid = "" movie_title = None
release_id = None
a = nzbName.find('.cp(') + 4 #search for .cptt( in nzbName media_id = None
b = nzbName[a:].find(')') + a
if a > 3: # a == 3 if not exist
imdbid = nzbName[a:b]
if imdbid:
logger.postprocess("Found movie id %s in name", imdbid)
return imdbid
a = dirName.find('.cp(') + 4 #search for .cptt( in dirname
b = dirName[a:].find(')') + a
if a > 3: # a == 3 if not exist
imdbid = dirName[a:b]
if imdbid:
logger.postprocess("Found movie id %s in directory", imdbid)
return imdbid
else:
logger.debug("Could not find an imdb id in directory or name")
return ""
def get_movie_postprocess(self, baseURL, imdbid, download_id):
movie_id = None
movie_status = None
movieid = []
moviestatus = []
library = []
release = []
offset = int(0)
release_status = None release_status = None
movies = {}
releases_found = {}
if not imdbid and not download_id: while(True):
return movie_id, imdbid, download_id, movie_status, release_status # find imdbid in nzbName
a = nzbName.find('.cp(') + 4
b = nzbName[a:].find(')') + a
if a > 3: # a == 3 if not exist
if nzbName[a:b]:
imdbid = nzbName[a:b]
logger.postprocess("Found imdbid %s in name", imdbid)
break
while True: # find imdbid in dirName
url = baseURL + "media.list/?limit_offset=50," + str(offset) a = dirName.find('.cp(') + 4
b = dirName[a:].find(')') + a
if a > 3: # a == 3 if not exist
if dirName[a:b]:
imdbid = dirName[a:b]
logger.postprocess("Found movie id %s in directory", imdbid)
break
logger.debug("Opening URL: %s", url) # regex match movie title from dirName or nzbName
movie_title_results = []
try: movie_title_regex = '(.*?)[ ._-]*([0-9]+)[ ._-](.*?)[ ._-]([^.]+)$'
r = requests.get(url) if dirName:
except requests.ConnectionError: movie_title_results.append(re.search(movie_title_regex, os.path.basename(dirName)))
logger.error("Unable to open URL") if nzbName:
break movie_title_results.append(re.search(movie_title_regex, nzbName))
library2 = []
try:
result = r.json()
movieid2 = [item["_id"] for item in result["movies"]]
for item in result["movies"]:
if "identifier" in item:
library2.append(item["identifier"])
else:
library2.append(item["identifiers"]["imdb"])
release2 = [item["releases"] for item in result["movies"]]
moviestatus2 = [item["status"] for item in result["movies"]]
except Exception, e:
logger.error("Unable to parse json data for movies")
break
movieid.extend(movieid2)
moviestatus.extend(moviestatus2)
library.extend(library2)
release.extend(release2)
if len(movieid2) < int(50): # finished parsing list of movies. Time to break.
break
offset = offset + 50
for index in range(len(movieid)):
releaselist1 = [item for item in release[index] if item["status"] == "snatched" and "download_info" in item]
if download_id:
releaselist = [item for item in releaselist1 if item["download_info"]["id"].lower() == download_id.lower()]
elif len(releaselist1) > 0:
releaselist = releaselist1
else:
releaselist = [item for item in release[index] if item["status"] == "downloaded" and "download_info" in item]
if imdbid and library[index] == imdbid:
movie_id = str(movieid[index])
movie_status = str(moviestatus[index])
logger.postprocess("Found movie id %s with status %s in CPS database for movie %s", movie_id, movie_status, imdbid)
if not download_id and len(releaselist) == 1:
download_id = releaselist[0]["download_info"]["id"]
elif not imdbid and download_id and len(releaselist) > 0:
movie_id = str(movieid[index])
movie_status = str(moviestatus[index])
imdbid = str(library[index])
logger.postprocess("Found movie id %s and imdb %s with status %s in CPS database via download_id %s", movie_id, imdbid, movie_status, download_id)
else:
continue
if len(releaselist) == 1:
release_status = releaselist[0]["status"]
logger.debug("Found a single release with download_id: %s. Release status is: %s", download_id, release_status)
movie_title = [x.group(1) for x in movie_title_results if x]
break break
if not movie_id: if imdbid:
logger.error("Could not parse database results to determine imdbid or movie id") section = 'media'
url = baseURL + "/media.get/?id=" + imdbid
else:
section = 'movies'
url = baseURL + "/media.list/?status=done&release_status=snatched,downloaded"
if movie_title:
url = baseURL + "/media.list/?search=" + movie_title[0] + "&status=done&release_status=snatched,downloaded"
return movie_id, imdbid, download_id, movie_status, release_status logger.debug("Opening URL: %s", url)
def get_status(self, baseURL, movie_id, download_id): try:
result = None r = requests.get(url)
movie_status = None except requests.ConnectionError:
logger.error("Unable to open URL")
return
results = r.json()
if results['success'] and not results['empty']:
for i, movie in enumerate(results[section]):
movies[i] = movie
if len(movies) > 0:
try:
for i, movie in enumerate(movies.values()):
for release in movie['releases']:
if not release['status'] in ['snatched', 'downloaded']:
continue
if download_id and 'download_id' in release['info'].keys():
if download_id != release['info']['download_id']:
continue
releases_found.update({i:release})
except:pass
if len(releases_found) == 1:
try:
release_id = releases_found[0]['_id']
media_id = releases_found[0]['media_id']
release_status = releases_found[0]['status']
download_id = releases_found[0]['info']['download_id']
except:pass
return media_id, download_id, release_id, release_status
def get_status(self, baseURL, release_id):
release_status = None release_status = None
if not movie_id:
return movie_status, release_status
logger.debug("Looking for status of movie: %s", movie_id) logger.debug("Attempting to get current status for release:%s", release_id)
url = baseURL + "media.get/?id=" + str(movie_id)
url = baseURL + "/media.get/?id=" + str(release_id)
logger.debug("Opening URL: %s", url) logger.debug("Opening URL: %s", url)
try: try:
@ -142,23 +114,10 @@ class autoProcessMovie:
try: try:
result = r.json() result = r.json()
movie_status = str(result["media"]["status"]) release_status = result["media"]["status"]
logger.debug("This movie is marked as status %s in CouchPotatoServer", movie_status) except:pass
except:
logger.error("Could not find a status for this movie")
try: return release_status
if len(result["media"]["releases"]) == 1 and result["media"]["releases"][0]["status"] == "done":
release_status = result["media"]["releases"][0]["status"]
else:
release_status_list = [item["status"] for item in result["media"]["releases"] if "download_info" in item and item["download_info"]["id"].lower() == download_id.lower()]
if len(release_status_list) == 1:
release_status = release_status_list[0]
logger.debug("This release is marked as status %s in CouchPotatoServer", release_status)
except: # index out of range/doesn't exist?
logger.error("Could not find a status for this release")
return movie_status, release_status
def process(self, dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None): def process(self, dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None):
if dirName is None: if dirName is None:
@ -181,15 +140,10 @@ class autoProcessMovie:
host = nzbtomedia.CFG[section][inputCategory]["host"] host = nzbtomedia.CFG[section][inputCategory]["host"]
port = nzbtomedia.CFG[section][inputCategory]["port"] port = nzbtomedia.CFG[section][inputCategory]["port"]
apikey = nzbtomedia.CFG[section][inputCategory]["apikey"] apikey = nzbtomedia.CFG[section][inputCategory]["apikey"]
delay = float(nzbtomedia.CFG[section][inputCategory]["delay"])
method = nzbtomedia.CFG[section][inputCategory]["method"] method = nzbtomedia.CFG[section][inputCategory]["method"]
delete_failed = int(nzbtomedia.CFG[section][inputCategory]["delete_failed"]) delete_failed = int(nzbtomedia.CFG[section][inputCategory]["delete_failed"])
wait_for = int(nzbtomedia.CFG[section][inputCategory]["wait_for"]) wait_for = int(nzbtomedia.CFG[section][inputCategory]["wait_for"])
try:
TimePerGiB = int(nzbtomedia.CFG[section][inputCategory]["TimePerGiB"])
except:
TimePerGiB = 60 # note, if using Network to transfer on 100Mbit LAN, expect ~ 600 MB/minute.
try: try:
ssl = int(nzbtomedia.CFG[section][inputCategory]["ssl"]) ssl = int(nzbtomedia.CFG[section][inputCategory]["ssl"])
except: except:
@ -203,14 +157,12 @@ class autoProcessMovie:
except: except:
transcode = 0 transcode = 0
try: try:
remoteCPS = int(nzbtomedia.CFG[section][inputCategory]["remoteCPS"]) remote_path = nzbtomedia.CFG[section][inputCategory]["remote_path"]
except: except:
remoteCPS = 0 remote_path = None
nzbName = str(nzbName) # make sure it is a string nzbName = str(nzbName) # make sure it is a string
imdbid = self.get_imdb(nzbName, dirName)
if ssl: if ssl:
protocol = "https://" protocol = "https://"
else: else:
@ -220,14 +172,21 @@ class autoProcessMovie:
if clientAgent == "manual": if clientAgent == "manual":
delay = 0 delay = 0
baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey + "/" baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey
movie_id, imdbid, download_id, initial_status, initial_release_status = self.get_movie_postprocess(baseURL, imdbid, download_id) # get the CPS database movie id for this movie. media_id, download_id, release_id, release_status = self.find_media_id(baseURL, download_id, dirName, nzbName) # get the CPS database movie id for this movie.
if initial_release_status == "downloaded":
logger.postprocess("This movie has already been post-processed by CouchPotatoServer, skipping ...") # failed to get a download id
if release_status != "snatched":
logger.postprocess("%s has is marked with a status of [%s] by CouchPotatoServer, skipping ...", nzbName, release_status.upper())
return 0 return 0
if not download_id:
logger.warning("Could not find a download ID in CouchPotatoServer database for release %s, skipping", nzbName)
logger.warning("Please manually ignore this release and try snatching it again")
return 1 # failure
process_all_exceptions(nzbName.lower(), dirName) process_all_exceptions(nzbName.lower(), dirName)
nzbName, dirName = convert_to_ascii(nzbName, dirName) nzbName, dirName = convert_to_ascii(nzbName, dirName)
@ -240,30 +199,25 @@ class autoProcessMovie:
logger.warning("Transcoding failed for files in %s", dirName) logger.warning("Transcoding failed for files in %s", dirName)
if method == "manage": if method == "manage":
command = "manage.update" command = "/manage.update"
else: else:
command = "renamer.scan" command = "/renamer.scan"
if clientAgent != "manual" and download_id != None:
if remoteCPS == 1:
command = command + "/?downloader=" + clientAgent + "&download_id=" + download_id
else:
command = command + "/?media_folder=" + urllib.quote(dirName) + "&downloader=" + clientAgent + "&download_id=" + download_id
dirSize = getDirectorySize(dirName) # get total directory size to calculate needed processing time. params = {}
TIME_OUT2 = int(TimePerGiB) * dirSize # Couchpotato needs to complete all moving and renaming before returning the status. if download_id:
TIME_OUT2 += 60 # Add an extra minute for over-head/processing/metadata. params['downloader'] = clientAgent
socket.setdefaulttimeout(int(TIME_OUT2)) #initialize socket timeout. We may now be able to remove the delays from the wait_for section below? If true, this should exit on first loop. params['download_id'] = download_id
if remote_path:
dirName_new = os.path.join(remote_path, os.path.basename(dirName)).replace("\\", "/")
params['media_folder'] = urllib.quote(dirName_new)
url = baseURL + command url = baseURL + command
logger.postprocess("Waiting for %s seconds to allow CPS to process newly extracted files", str(delay))
time.sleep(delay)
logger.debug("Opening URL: %s", url) logger.debug("Opening URL: %s", url)
try: try:
r = requests.get(url) r = requests.get(url, data=params)
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL") logger.error("Unable to open URL")
return 1 # failure return 1 # failure
@ -280,13 +234,13 @@ class autoProcessMovie:
logger.postprocess("Download of %s has failed.", nzbName) logger.postprocess("Download of %s has failed.", nzbName)
logger.postprocess("Trying to re-cue the next highest ranked release") logger.postprocess("Trying to re-cue the next highest ranked release")
if not movie_id: if not download_id:
logger.warning("Cound not find a movie in the database for release %s", nzbName) logger.warning("Cound not find a movie in the database for release %s", nzbName)
logger.warning("Please manually ignore this release and refresh the wanted movie") logger.warning("Please manually ignore this release and refresh the wanted movie")
logger.error("Exiting autoProcessMovie script") logger.error("Exiting autoProcessMovie script")
return 1 # failure return 1 # failure
url = baseURL + "movie.searcher.try_next/?media_id=" + movie_id url = baseURL + "movie.searcher.try_next/?media_id=" + media_id
logger.debug("Opening URL: %s", url) logger.debug("Opening URL: %s", url)
@ -298,8 +252,9 @@ class autoProcessMovie:
for line in r.iter_lines(): for line in r.iter_lines():
if line: logger.postprocess("%s", line) if line: logger.postprocess("%s", line)
logger.postprocess("Movie %s set to try the next best release on CouchPotatoServer", movie_id)
if delete_failed and not dirName in ['sys.argv[0]','/','']: logger.postprocess("%s FAILED!, Trying the next best release on CouchPotatoServer", nzbName)
if delete_failed and not dirName in [sys.argv[0],'/','']:
logger.postprocess("Deleting failed files and folder %s", dirName) logger.postprocess("Deleting failed files and folder %s", dirName)
try: try:
shutil.rmtree(dirName) shutil.rmtree(dirName)
@ -307,27 +262,21 @@ class autoProcessMovie:
logger.error("Unable to delete folder %s", dirName) logger.error("Unable to delete folder %s", dirName)
return 0 # success return 0 # success
if clientAgent == "manual": if not release_id:
return 0 # success
if not download_id:
if clientAgent in ['utorrent', 'transmission', 'deluge'] : if clientAgent in ['utorrent', 'transmission', 'deluge'] :
return 1 # just to be sure TorrentToMedia doesn't start deleting files as we havent verified changed status. return 1 # just to be sure TorrentToMedia doesn't start deleting files as we havent verified changed status.
else: else:
return 0 # success return 0 # success
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
release_status = None while (True): # only wait 2 (default) minutes, then return.
start = datetime.datetime.now() # set time for timeout current_status = self.get_status(baseURL, release_id)
pause_for = int(wait_for) * 10 # keep this so we only ever have 6 complete loops. This may not be necessary now? if current_status is None:
while (datetime.datetime.now() - start) < datetime.timedelta(minutes=wait_for): # only wait 2 (default) minutes, then return. logger.error("Could not find a current status for %s", nzbName)
movie_status, release_status = self.get_status(baseURL, movie_id, download_id) # get the current status fo this movie. return 1
if movie_status and initial_status and movie_status != initial_status: # Something has changed. CPS must have processed this movie.
logger.postprocess("SUCCESS: This movie is now marked as status %s in CouchPotatoServer", movie_status) if current_status != release_status: # Something has changed. CPS must have processed this movie.
return 0 # success logger.postprocess("SUCCESS: This release is now marked as status [%s] in CouchPotatoServer", current_status.upper())
time.sleep(pause_for) # Just stop this looping infinitely and hogging resources for 2 minutes ;)
else:
if release_status and initial_release_status and release_status != initial_release_status: # Something has changed. CPS must have processed this movie.
logger.postprocess("SUCCESS: This release is now marked as status %s in CouchPotatoServer", release_status)
return 0 # success return 0 # success
else: # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now. else: # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now.
logger.warning("The movie does not appear to have changed status after %s minutes. Please check CouchPotato Logs", wait_for) logger.warning("The movie does not appear to have changed status after %s minutes. Please check CouchPotato Logs", wait_for)

View file

@ -1,10 +1,9 @@
import time import time
import datetime import datetime
import socket
import urllib import urllib
import nzbtomedia import nzbtomedia
from lib import requests from lib import requests
from nzbtomedia.nzbToMediaUtil import convert_to_ascii, getDirectorySize from nzbtomedia.nzbToMediaUtil import convert_to_ascii
from nzbtomedia import logger from nzbtomedia import logger
class autoProcessMusic: class autoProcessMusic:
@ -39,10 +38,6 @@ class autoProcessMusic:
web_root = nzbtomedia.CFG[section][inputCategory]["web_root"] web_root = nzbtomedia.CFG[section][inputCategory]["web_root"]
except: except:
web_root = "" web_root = ""
try:
TimePerGiB = int(nzbtomedia.CFG[section][inputCategory]["TimePerGiB"])
except:
TimePerGiB = 60 # note, if using Network to transfer on 100Mbit LAN, expect ~ 600 MB/minute.
if ssl: if ssl:
protocol = "https://" protocol = "https://"
@ -54,11 +49,6 @@ class autoProcessMusic:
nzbName, dirName = convert_to_ascii(nzbName, dirName) nzbName, dirName = convert_to_ascii(nzbName, dirName)
dirSize = getDirectorySize(dirName) # get total directory size to calculate needed processing time.
TIME_OUT = int(TimePerGiB) * dirSize # HeadPhones needs to complete all moving/transcoding and renaming before returning the status.
TIME_OUT += 60 # Add an extra minute for over-head/processing/metadata.
socket.setdefaulttimeout(int(TIME_OUT)) #initialize socket timeout.
baseURL = protocol + host + ":" + port + web_root + "/api?" baseURL = protocol + host + ":" + port + web_root + "/api?"
if status == 0: if status == 0:
@ -68,7 +58,7 @@ class autoProcessMusic:
params['cmd'] = "forceProcess" params['cmd'] = "forceProcess"
params['dir'] = dirName params['dir'] = dirName
url = baseURL + urllib.urlencode(params) url = baseURL
logger.postprocess("Waiting for %s seconds to allow HeadPhones to process newly extracted files", str(delay)) logger.postprocess("Waiting for %s seconds to allow HeadPhones to process newly extracted files", str(delay))
@ -77,7 +67,7 @@ class autoProcessMusic:
logger.debug("Opening URL: %s", url) logger.debug("Opening URL: %s", url)
try: try:
r = requests.get(url) r = requests.get(url, data=params)
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL") logger.error("Unable to open URL")
return 1 # failure return 1 # failure

View file

@ -1,7 +1,6 @@
import copy import copy
import json import json
import os import os
import socket
import urllib import urllib
import time import time
import nzbtomedia import nzbtomedia
@ -9,7 +8,7 @@ from lib import requests
from nzbtomedia.Transcoder import Transcoder from nzbtomedia.Transcoder import Transcoder
from nzbtomedia.nzbToMediaAutoFork import autoFork from nzbtomedia.nzbToMediaAutoFork import autoFork
from nzbtomedia.nzbToMediaSceneExceptions import process_all_exceptions from nzbtomedia.nzbToMediaSceneExceptions import process_all_exceptions
from nzbtomedia.nzbToMediaUtil import convert_to_ascii, is_sample, flatten, getDirectorySize, delete from nzbtomedia.nzbToMediaUtil import convert_to_ascii, is_sample, flatten, delete
from nzbtomedia import logger from nzbtomedia import logger
class autoProcessTV: class autoProcessTV:
@ -43,7 +42,6 @@ class autoProcessTV:
apikey = nzbtomedia.CFG[section][inputCategory]["apikey"] apikey = nzbtomedia.CFG[section][inputCategory]["apikey"]
except: except:
apikey = "" apikey = ""
try: try:
ssl = int(nzbtomedia.CFG[section][inputCategory]["ssl"]) ssl = int(nzbtomedia.CFG[section][inputCategory]["ssl"])
except: except:
@ -52,10 +50,6 @@ class autoProcessTV:
web_root = nzbtomedia.CFG[section][inputCategory]["web_root"] web_root = nzbtomedia.CFG[section][inputCategory]["web_root"]
except: except:
web_root = "" web_root = ""
try:
watch_dir = nzbtomedia.CFG[section][inputCategory]["watch_dir"]
except:
watch_dir = ""
try: try:
transcode = int(nzbtomedia.CFG["Transcoder"]["transcode"]) transcode = int(nzbtomedia.CFG["Transcoder"]["transcode"])
except: except:
@ -68,10 +62,6 @@ class autoProcessTV:
delay = float(nzbtomedia.CFG[section][inputCategory]["delay"]) delay = float(nzbtomedia.CFG[section][inputCategory]["delay"])
except: except:
delay = 0 delay = 0
try:
TimePerGiB = int(nzbtomedia.CFG[section][inputCategory]["TimePerGiB"])
except:
TimePerGiB = 60 # note, if using Network to transfer on 100Mbit LAN, expect ~ 600 MB/minute.
try: try:
SampleIDs = (nzbtomedia.CFG["Extensions"]["SampleIDs"]) SampleIDs = (nzbtomedia.CFG["Extensions"]["SampleIDs"])
except: except:
@ -89,10 +79,6 @@ class autoProcessTV:
except: except:
Torrent_NoLink = 0 Torrent_NoLink = 0
mediaContainer = (nzbtomedia.CFG["Extensions"]["mediaExtensions"])
minSampleSize = int(nzbtomedia.CFG["Extensions"]["minSampleSize"])
if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name. if not os.path.isdir(dirName) and os.path.isfile(dirName): # If the input directory is a file, assume single file download and split dir/name.
dirName = os.path.split(os.path.normpath(dirName))[0] dirName = os.path.split(os.path.normpath(dirName))[0]
@ -114,8 +100,8 @@ class autoProcessTV:
for file in filenames: for file in filenames:
filePath = os.path.join(dirpath, file) filePath = os.path.join(dirpath, file)
fileExtension = os.path.splitext(file)[1] fileExtension = os.path.splitext(file)[1]
if fileExtension in mediaContainer: # If the file is a video file if fileExtension in nzbtomedia.MEDIACONTAINER: # If the file is a video file
if is_sample(filePath, nzbName, minSampleSize, SampleIDs): if is_sample(filePath, nzbName, nzbtomedia.MINSAMPLESIZE, SampleIDs):
logger.debug("Removing sample file: %s", filePath) logger.debug("Removing sample file: %s", filePath)
os.unlink(filePath) # remove samples os.unlink(filePath) # remove samples
else: else:
@ -130,11 +116,6 @@ class autoProcessTV:
status = int(1) status = int(1)
failed = True failed = True
dirSize = getDirectorySize(dirName) # get total directory size to calculate needed processing time.
TIME_OUT = int(TimePerGiB) * dirSize # SickBeard needs to complete all moving and renaming before returning the log sequence via url.
TIME_OUT += 60 # Add an extra minute for over-head/processing/metadata.
socket.setdefaulttimeout(int(TIME_OUT)) #initialize socket timeout.
# configure SB params to pass # configure SB params to pass
fork_params['quiet'] = 1 fork_params['quiet'] = 1
if nzbName is not None: if nzbName is not None: