Merge branch 'refactor0.7' into dev

Conflicts:
	TorrentToMedia.py
This commit is contained in:
clinton-hall 2013-03-04 18:17:13 -08:00
commit 919ea031a8
11 changed files with 596 additions and 371 deletions

View file

@ -12,7 +12,6 @@ from subprocess import call
# Custom imports
import linktastic.linktastic as linktastic
import extractor.extractor as extractor
import autoProcessMovie
import autoProcessTV
@ -20,316 +19,183 @@ from nzbToMediaEnv import *
from nzbToMediaUtil import *
from utorrent.client import UTorrentClient
nzbtomedia_configure_logging(os.path.dirname(sys.argv[0]))
Logger = logging.getLogger(__name__)
def main(inputDirectory, inputName, inputCategory, inputHash):
status = int(1) # 1 = failed | 0 = success
root = int(0)
video = int(0)
video2 = int(0)
def category_search(inputDirectory, inputName, inputCategory, root, categories):
categorySearch = [os.path.normpath(inputDirectory), ""] # initializie
notfound = 0
for x in range(10): # loop up through 10 directories looking for category.
try:
categorySearch2 = os.path.split(os.path.normpath(categorySearch[0]))
except: # this might happen when we can't go higher.
if inputCategory and inputName: # if these exists, we are ok to proceed, but assume we are in a root/common directory.
Logger.info("SEARCH: Could not find a Torrent Name or category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 1
break # we are done
elif inputCategory: # if this exists, we are ok to proceed, but assume we are in a root/common directory and we have to check file dates.
Logger.info("SEARCH: Could not find a Torrent Name or Category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 2
break # we are done
else:
Logger.error("SEARCH: Could not identify Category of Torrent Name in the directory structure. Please check downloader settings. Exiting")
sys.exit(-1)
Logger.debug("MAIN: Received Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)
if categorySearch2[1] in categories:
Logger.debug("SEARCH: Found Category: %s in directory structure", categorySearch2[1])
if not inputCategory:
Logger.info("SEARCH: Determined Category to be: %s", categorySearch2[1])
inputCategory = categorySearch2[1]
if inputName and categorySearch[0] != os.path.normpath(inputDirectory): # if we are not in the root directory and we have inputName we can continue.
if ('.cp(tt' in categorySearch[1]) and (not '.cp(tt' in inputName): # if the directory was created by CouchPotato, and this tag is not in Torrent name, we want to add it.
Logger.info("SEARCH: Changing Torrent Name to %s to preserve imdb id.", categorySearch[1])
inputName = categorySearch[1]
Logger.info("SEARCH: Identified Category: %s and Torrent Name: %s. We are in a unique directory, so we can proceed.", inputCategory, inputName)
break # we are done
elif categorySearch[1] and not inputName: # assume the the next directory deep is the torrent name.
Logger.info("SEARCH: Found torrent directory %s in category directory %s", os.path.join(categorySearch[0], categorySearch[1]), categorySearch[0])
inputName = categorySearch[1]
break # we are done
elif ('.cp(tt' in categorySearch[1]) and (not '.cp(tt' in inputName): # if the directory was created by CouchPotato, and this tag is not in Torrent name, we want to add it.
Logger.info("SEARCH: Changing Torrent Name to %s to preserve imdb id.", categorySearch[1])
inputName = categorySearch[1]
break # we are done
elif os.path.isdir(os.path.join(categorySearch[0], inputName)) and inputName: # testing for torrent name in first sub directory
Logger.info("SEARCH: Found torrent directory %s in category directory %s", os.path.join(categorySearch[0], inputName), categorySearch[0])
if categorySearch[0] == os.path.normpath(inputDirectory): # only true on first pass, x =0
inputDirectory = os.path.join(categorySearch[0], inputName) # we only want to search this next dir up.
break # we are done
elif inputName: # if these exists, we are ok to proceed, but we are in a root/common directory.
Logger.info("SEARCH: Could not find a unique torrent folder in the directory structure")
Logger.info("SEARCH: The directory passed is the root directory for category %s", categorySearch2[1])
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 1
break # we are done
else: # this is a problem! if we don't have Torrent name and are in the root category dir, we can't proceed.
Logger.warn("SEARCH: Could not identify a torrent name and the directory passed is common to all downloads for category %s.", categorySearch[1])
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 2
break
elif categorySearch2[1] == inputName and inputName: # we have identified a unique directory.
Logger.info("SEARCH: Files appear to be in their own directory")
if inputCategory: # we are ok to proceed.
break # we are done
else:
Logger.debug("SEARCH: Continuing scan to determin category.")
categorySearch = categorySearch2 # ready for next loop
continue # keep going
inputDirectory, inputName, inputCategory, root = category_search(inputDirectory, inputName, inputCategory, root, categories) # Confirm the category by parsing directory structure
for category in categories:
if category == inputCategory:
outputDestination = os.path.normpath(os.path.join(outputDirectory, category, safeName(inputName)))
Logger.info("MAIN: Output directory set to: %s", outputDestination)
break
else:
if x == 9: # This is the last pass in the loop and we didn't find anything.
notfound = 1
break # we are done
else:
categorySearch = categorySearch2 # ready for next loop
continue # keep going
if notfound == 1:
if inputCategory and inputName: # if these exists, we are ok to proceed, but assume we are in a root/common directory.
Logger.info("SEARCH: Could not find a category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 1
elif inputCategory: # if this exists, we are ok to proceed, but assume we are in a root/common directory and we have to check file dates.
Logger.info("SEARCH: Could not find a Torrent Name or Category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 2
if not inputCategory: # we didn't find this after 10 loops. This is a problem.
Logger.error("SEARCH: Could not identify category and torrent name from the directory structure. Please check downloader settings. Exiting")
sys.exit(-1) # Oh yeah.... WE ARE DONE!
return inputDirectory, inputName, inputCategory, root
def is_sample(filePath, inputName, minSampleSize):
# 200 MB in bytes
SIZE_CUTOFF = minSampleSize * 1024 * 1024
# Ignore 'sample' in files unless 'sample' in Torrent Name
return ('sample' in filePath.lower()) and (not 'sample' in inputName) and (os.path.getsize(filePath) < SIZE_CUTOFF)
def copy_link(source, target, useLink, outputDestination):
create_destination(outputDestination)
if useLink:
try:
Logger.info("COPYLINK: Linking %s to %s", source, target)
linktastic.link(source, target)
except:
if os.path.isfile(target):
Logger.info("COPYLINK: Something went wrong in linktastic.link, but the destination file was created")
else:
Logger.info("COPYLINK: Something went wrong in linktastic.link, copying instead")
Logger.debug("COPYLINK: Copying %s to %s", source, target)
shutil.copy(source, target)
else:
Logger.debug("Copying %s to %s", source, target)
shutil.copy(source, target)
return True
def flatten(outputDestination):
Logger.info("FLATTEN: Flattening directory: %s", outputDestination)
for dirpath, dirnames, filenames in os.walk(outputDestination): # Flatten out the directory to make postprocessing easier
if dirpath == outputDestination:
continue # No need to try and move files in the root destination directory
for filename in filenames:
source = os.path.join(dirpath, filename)
target = os.path.join(outputDestination, filename)
try:
shutil.move(source, target)
except OSError:
Logger.error("FLATTEN: Could not flatten %s", source)
removeEmptyFolders(outputDestination) # Cleanup empty directories
def removeEmptyFolders(path):
Logger.info("REMOVER: Removing empty folders in: %s", path)
if not os.path.isdir(path):
return
# Remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
removeEmptyFolders(fullpath)
# If folder empty, delete it
files = os.listdir(path)
if len(files) == 0:
Logger.debug("REMOVER: Removing empty folder: %s", path)
os.rmdir(path)
Logger.info("==========================") # Seperate old from new log
Logger.info("TorrentToMedia %s", VERSION)
config = ConfigParser.ConfigParser()
configFilename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessMedia.cfg")
### TORRENT TO MEDIA ###
if not os.path.isfile(configFilename):
Logger.error("You need an autoProcessMedia.cfg file - did you rename and edit the .sample?")
sys.exit(-1)
Logger.info("MAIN: Loading config from %s", configFilename)
config.read(configFilename)
clientAgent = config.get("Torrent", "clientAgent")
try:
inputDirectory, inputName, inputCategory, inputHash = parse_args(clientAgent)
except:
Logger.error("MAIN: There was a problem loading variables: Exiting")
sys.exit(-1)
#### Main routine starts here.
Logger.debug("MAIN: Received Directory: %s | Name: %s | Category: %s", inputDirectory, inputName, inputCategory)
# Sick-Beard
tvCategory = config.get("SickBeard", "category")
tvDestination = os.path.normpath(config.get("SickBeard", "outputDirectory"))
# CouchPotatoServer
movieCategory = config.get("CouchPotato", "category")
movieDestination = os.path.normpath(config.get("CouchPotato", "outputDirectory"))
# Torrent specific
useLink = config.get("Torrent", "useLink")
minSampleSize = int(config.get("Torrent", "minSampleSize"))
uTorrentWEBui = config.get("Torrent", "uTorrentWEBui")
uTorrentUSR = config.get("Torrent", "uTorrentUSR")
uTorrentPWD = config.get("Torrent", "uTorrentPWD")
compressedContainer = (config.get("Torrent", "compressedExtentions")).split(',')
mediaContainer = (config.get("Torrent", "mediaExtentions")).split(',')
metaContainer = (config.get("Torrent", "metaExtentions")).split(',')
categories = (config.get("Torrent", "categories")).split(',')
categories.append(movieCategory)
categories.append(tvCategory) # now have a list of all categories in use.
status = int(1) # We start as "failed" until we verify movie file in destination
root = int(0)
video = int(0)
video2 = int(0)
failed_link = int(0)
failed_extract = int(0)
inputDirectory, inputName, inputCategory, root = category_search(inputDirectory, inputName, inputCategory, root, categories) # Confirm the category by parsing directory structure
if inputCategory == movieCategory:
outputDestination = os.path.normpath(os.path.join(movieDestination, inputName))
elif inputCategory == tvCategory:
outputDestination = os.path.normpath(os.path.join(tvDestination, inputName))
else:
Logger.error("MAIN: Category of %s does not match either %s or %s: Exiting", inputCategory, movieCategory, tvCategory)
Logger.debug("MAIN: Future versions of this script might do something for Category: %s. Keep updating ;)", inputCategory)
sys.exit(-1)
Logger.debug("MAIN: Scanning files in directory: %s", inputDirectory)
if root == 1:
Logger.debug("MAIN: Looking for %s in filename", inputName)
elif root == 2:
Logger.debug("MAIN: Looking for files with modified/created dates less than 5 minutes old.")
now = datetime.datetime.now()
for dirpath, dirnames, filenames in os.walk(inputDirectory):
for file in filenames:
if root == 1:
if (inputName in file) or (os.path.splitext(file)[0] in inputName):
pass # This file does match the Torrent name
Logger.debug("Found file %s that matches Torrent Name %s", file, inputName)
else:
continue # This file does not match the Torrent name, skip it
if root == 2:
mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(dirpath, file)))
ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(os.path.join(dirpath, file)))
if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)):
pass # This file does match the date time criteria
Logger.debug("Found file %s with date modifed/created less than 5 minutes ago.", file)
else:
continue # This file has not been recently moved or created, skip it
filePath = os.path.join(dirpath, file)
fileExtention = os.path.splitext(file)[1]
if fileExtention in mediaContainer: # If the file is a video file
if is_sample(filePath, inputName, minSampleSize): # Ignore samples
Logger.info("MAIN: Ignoring sample file: %s ", filePath)
continue
else:
video = video + 1
source = filePath
target = os.path.join(outputDestination, file)
Logger.info("MAIN: Found video file %s in %s", fileExtention, filePath)
state = copy_link(source, target, useLink, outputDestination)
if state == False:
Logger.error("MAIN: Failed to link file %s", file)
failed_link = 1
elif fileExtention in metaContainer:
source = filePath
target = os.path.join(outputDestination, file)
Logger.info("MAIN: Found metadata file %s for file %s", fileExtention, filePath)
state = copy_link(source, target, useLink, outputDestination)
if state == False:
Logger.error("MAIN: Failed to link file %s", file)
failed_link = 1
elif fileExtention in compressedContainer:
Logger.info("MAIN: Found compressed archive %s for file %s", fileExtention, filePath)
source = filePath
target = os.path.join(outputDestination, file)
try:
extractor.extract(dirpath, file, outputDestination)
except:
Logger.warn("Extraction failed for %s", file)
else:
Logger.debug("MAIN: Ignoring unknown filetype %s for file %s", fileExtention, filePath)
continue
flatten(outputDestination)
# Now check if movie files exist in destination:
for dirpath, dirnames, filenames in os.walk(outputDestination):
for file in filenames:
filePath = os.path.join(dirpath, file)
fileExtention = os.path.splitext(file)[1]
if fileExtention in mediaContainer: # If the file is a video file
if is_sample(filePath, inputName, minSampleSize):
Logger.debug("Removing sample file: %s", filePath)
os.unlink(filePath) # remove samples
Logger.debug("MAIN: Scanning files in directory: %s", inputDirectory)
now = datetime.datetime.now()
for dirpath, dirnames, filenames in os.walk(inputDirectory):
for file in filenames:
filePath = os.path.join(dirpath, file)
fileExtention = os.path.splitext(file)[1]
targetDirectory = os.path.join(outputDestination, file)
if root == 1:
Logger.debug("MAIN: Looking for %s in filename", inputName)
if (safeName(inputName) in safeName(file)) or (safeName(os.path.splitext(file)[0]) in safeName(inputName)):
pass # This file does match the Torrent name
Logger.debug("Found file %s that matches Torrent Name %s", file, inputName)
else:
continue # This file does not match the Torrent name, skip it
if root == 2:
Logger.debug("MAIN: Looking for files with modified/created dates less than 5 minutes old.")
mtime_lapse = now - datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(dirpath, file)))
ctime_lapse = now - datetime.datetime.fromtimestamp(os.path.getctime(os.path.join(dirpath, file)))
if (mtime_lapse < datetime.timedelta(minutes=5)) or (ctime_lapse < datetime.timedelta(minutes=5)):
pass # This file does match the date time criteria
Logger.debug("Found file %s with date modifed/created less than 5 minutes ago.", file)
else:
continue # This file has not been recently moved or created, skip it
if fileExtention in mediaContainer: # If the file is a video file
if is_sample(filePath, inputName, minSampleSize): # Ignore samples
Logger.info("MAIN: Ignoring sample file: %s ", filePath)
continue
else:
video = video + 1
Logger.info("MAIN: Found video file %s in %s", fileExtention, filePath)
try:
copy_link(filePath, targetDirectory, useLink, outputDestination)
except Exception as e:
Logger.error("MAIN: Failed to link file: %s", file)
Logger.debug(e)
elif fileExtention in metaContainer:
Logger.info("MAIN: Found metadata file %s for file %s", fileExtention, filePath)
try:
copy_link(filePath, targetDirectory, useLink, outputDestination)
except Exception as e:
Logger.error("MAIN: Failed to link file: %s", file)
Logger.debug(e)
elif fileExtention in compressedContainer:
Logger.info("MAIN: Found compressed archive %s for file %s", fileExtention, filePath)
try:
extractor.extract(filePath, outputDestination)
except Exception as e:
Logger.warn("MAIN: Extraction failed for: %s", file)
Logger.debug(e)
else:
<<<<<<< HEAD
video2 = video2 + 1
if video2 >= video and video2 > 0: # Check that all video files were moved
status = 0
=======
Logger.debug("MAIN: Ignoring unknown filetype %s for file %s", fileExtention, filePath)
continue
flatten(outputDestination)
if status == 0: #### Maybe we should move this to a more appropriate place?
Logger.info("MAIN: Successful run")
Logger.debug("MAIN: Calling autoProcess script for successful download.")
elif failed_extract == 1 and failed_link == 0: # failed to extract files only.
Logger.info("MAIN: Failed to extract a compressed archive")
Logger.debug("MAIN: Assume this to be password protected file.")
Logger.debug("MAIN: Calling autoProcess script for failed download.")
else:
Logger.error("MAIN: Something failed! Please check logs. Exiting")
sys.exit(-1)
# Now check if movie files exist in destination:
for dirpath, dirnames, filenames in os.walk(outputDestination):
for file in filenames:
filePath = os.path.join(dirpath, file)
fileExtention = os.path.splitext(file)[1]
if fileExtention in mediaContainer: # If the file is a video file
if is_sample(filePath, inputName, minSampleSize):
Logger.debug("MAIN: Removing sample file: %s", filePath)
os.unlink(filePath) # remove samples
else:
video2 = video2 + 1
if video2 >= video and video2 > 0: # Check that all video files were moved
status = 0
if status == 0: #### Maybe we should move this to a more appropriate place?
Logger.info("MAIN: Successful run")
Logger.debug("MAIN: Calling autoProcess script for successful download.")
else:
Logger.error("MAIN: Something failed! Please check logs. Exiting")
sys.exit(-1)
>>>>>>> refactor0.7
#### quick 'n dirt hardlink solution for uTorrent, need to implent support for deluge, transmission
if inputHash and useLink and clientAgent == 'utorrent':
try:
Logger.debug("MAIN: Connecting to uTorrent: %s", uTorrentWEBui)
utorrentClass = UTorrentClient(uTorrentWEBui, uTorrentUSR, uTorrentPWD)
except Exception as e:
Logger.error("MAIN: Failed to connect to uTorrent: %s", e)
Logger.debug("MAIN: Stoping torrent %s in uTorrent while processing", inputName)
utorrentClass.stop(inputHash)
time.sleep(5) # Give uTorrent some time to catch up with the change
##### quick 'n dirt hardlink solution for uTorrent, need to implent support for deluge, transmission
# Now we pass off to CouchPotato or Sick-Beard
if inputCategory == cpsCategory:
Logger.info("MAIN: Calling CouchPotatoServer to post-process: %s", inputName)
result = autoProcessMovie.process(outputDestination, inputName, status)
elif inputCategory == sbCategory:
Logger.info("MAIN: Calling Sick-Beard to post-process: %s", inputName)
result = autoProcessTV.processEpisode(outputDestination, inputName, status)
if result == 1:
Logger.info("MAIN: A problem was reported in the autoProcess* script. If torrent was pasued we will resume seeding")
#### quick 'n dirt hardlink solution for uTorrent, need to implent support for deluge, transmission
if inputHash and useLink and clientAgent == 'utorrent' and status == 0: # only resume seeding for successfully extracted files?
Logger.debug("MAIN: Starting torrent %s in uTorrent", inputName)
utorrentClass.start(inputHash)
#### quick 'n dirt hardlink solution for uTorrent, need to implent support for deluge, transmission
Logger.info("MAIN: All done.")
if __name__ == "__main__":
# Logging
nzbtomedia_configure_logging(os.path.dirname(sys.argv[0]))
Logger = logging.getLogger(__name__)
Logger.info("====================") # Seperate old from new log
Logger.info("TorrentToMedia %s", VERSION)
config = ConfigParser.ConfigParser()
configFilename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessMedia.cfg")
if not os.path.isfile(configFilename):
Logger.error("You need an autoProcessMedia.cfg file - did you rename and edit the .sample?")
sys.exit(-1)
# CONFIG FILE
Logger.info("MAIN: Loading config from %s", configFilename)
config.read(configFilename)
# EXAMPLE VALUES:
clientAgent = config.get("Torrent", "clientAgent") # utorrent | deluge | transmission | other
useLink = config.get("Torrent", "useLink") # true | false
minSampleSize = int(config.get("Torrent", "minSampleSize")) # 200 (in MB)
outputDirectory = config.get("Torrent", "outputDirectory") # /abs/path/to/complete/
categories = (config.get("Torrent", "categories")).split(',') # music,music_videos,pictures,software
uTorrentWEBui = config.get("Torrent", "uTorrentWEBui") # http://localhost:8090/gui/
uTorrentUSR = config.get("Torrent", "uTorrentUSR") # mysecretusr
uTorrentPWD = config.get("Torrent", "uTorrentPWD") # mysecretpwr
compressedContainer = (config.get("Torrent", "compressedExtentions")).split(',') # .zip,.rar,.7z
mediaContainer = (config.get("Torrent", "mediaExtentions")).split(',') # .mkv,.avi,.divx
metaContainer = (config.get("Torrent", "metaExtentions")).split(',') # .nfo,.sub,.srt
cpsCategory = config.get("CouchPotato", "cpsCategory") # movie
sbCategory = config.get("SickBeard", "sbCategory") # tv
categories.append(cpsCategory)
categories.append(sbCategory)
# Hardlink solution with uTorrent
if inputHash and useLink:
try:
<<<<<<< HEAD
Logger.debug("MAIN: Connecting to uTorrent: %s", uTorrentWEBui)
utorrentClass = UTorrentClient(uTorrentWEBui, uTorrentUSR, uTorrentPWD)
except:
@ -355,8 +221,11 @@ while os.path.exists(outputDestination): # while this directory is still here,
time.sleep(10) #Just stop this looping infinitely and hogging resources for 3 minutes ;)
else: # CPS (and SickBeard) have finished. We can now resume seeding.
Logger.info("MAIN: Post-process appears to have succeeded for: %s", inputName)
=======
inputDirectory, inputName, inputCategory, inputHash = parse_args(clientAgent)
except Exception as e:
Logger.error("MAIN: There was a problem loading variables: %s", e)
sys.exit(-1)
>>>>>>> refactor0.7
# Hardlink solution with uTorrent
if inputHash and useLink:
Logger.debug("MAIN: Starting torrent %s in uTorrent", inputName)
utorrentClass.start(inputHash)
main(inputDirectory, inputName, inputCategory, inputHash)

View file

@ -1,6 +1,6 @@
[CouchPotato]
category = movie
outputDirectory = /abs/path/to/complete/movies
#### cpsCategory - category that gets called for post-processing with CPS
cpsCategory = movie
apikey =
host = localhost
port = 5050
@ -15,8 +15,8 @@ delete_failed = 0
[SickBeard]
category = tv
outputDirectory = /abs/path/to/complete/tv
#### sbCategory - category that gets called for post-processing with CPS
sbCategory = tv
host=localhost
port=8081
username=
@ -27,14 +27,16 @@ ssl=0
watch_dir=
failed_fork=0
[Torrent]
###### Set to whatever torrent client you use.
###### Supported values: utorrent, transmission, deluge, other
###### clientAgent - Supported clients: utorrent, transmission, deluge, other
clientAgent = other
###### useLink - Set to true or false depending on if you want to use hardlinks
useLink = false
###### minSampleSize - Minimum required size to consider a file not an sample file (in MB, eg 200mb)
minSampleSize = 200
###### outputDirectory - Default output directory (categories will be appended as sub directory to outputDirectory)
outputDirectory = /abs/path/to/complete/
###### **insert descriptive comment for categories here** :-)
categories = music,music_videos,pictures,software
###### uTorrent Hardlink solution (You must edit this if your using TorrentToMedia.py with uTorrent)

View file

@ -3,6 +3,7 @@ import urllib
import os
import shutil
import ConfigParser
import datetime
import time
import json
import logging
@ -30,6 +31,74 @@ class AuthURLOpener(urllib.FancyURLopener):
self.numTries = 0
return urllib.FancyURLopener.open(self, url)
def get_imdb(nzbName, dirName):
a=nzbName.find('.cp(')+4 #search for .cptt( in nzbName
b=nzbName[a:].find(')')+a
imdbid=nzbName[a:b]
if imdbid:
Logger.info("Found movie id %s in name", imdbid)
return imdbid
a=dirName.find('.cp(')+4 #search for .cptt( in dirname
b=dirName[a:].find(')')+a
imdbid=dirName[a:b]
if imdbid:
Logger.info("Found movie id %s in directory", imdbid)
return imdbid
else:
Logger.warning("Could not find an imdb id in directory or name")
Logger.info("Postprocessing will continue, but the movie may not be identified correctly by CouchPotato")
return ""
def get_movie_info(myOpener, baseURL, imdbid):
if not imdbid:
return ""
url = baseURL + "movie.list"
Logger.debug("Opening URL: %s", url)
try:
urlObj = myOpener.openit(url)
except IOError, e:
Logger.error("Unable to open URL: %s", str(e))
return ""
movie_id = ""
result = json.load(urlObj)
movieid = [item["id"] for item in result["movies"]]
library = [item["library"]["identifier"] for item in result["movies"]]
for index in range(len(movieid)):
if library[index] == imdbid:
movie_id = str(movieid[index])
Logger.info("Found movie id %s in CPS database for movie %s", movie_id, imdbid)
break
return movie_id
def get_status(myOpener, baseURL, movie_id):
if not movie_id:
return ""
url = baseURL + "movie.get/?id=" + str(movie_id)
Logger.debug("Opening URL: %s", url)
try:
urlObj = myOpener.openit(url)
except IOError, e:
Logger.error("Unable to open URL: %s", str(e))
return ""
result = json.load(urlObj)
try:
movie_status = result["movie"]["status"]["identifier"]
Logger.debug("This movie is marked as status %s in CouchPotatoServer", movie_status)
return movie_status
except e: # index out of range/doesn't exist?
Logger.error("Could not find a status for this movie due to: %s", str(e))
return ""
def process(dirName, nzbName=None, status=0):
@ -40,7 +109,7 @@ def process(dirName, nzbName=None, status=0):
if not os.path.isfile(configFilename):
Logger.error("You need an autoProcessMedia.cfg file - did you rename and edit the .sample?")
sys.exit(-1)
return 1 # failure
config.read(configFilename)
@ -65,7 +134,9 @@ def process(dirName, nzbName=None, status=0):
myOpener = AuthURLOpener(username, password)
nzbName1 = str(nzbName)
nzbName = str(nzbName) # make sure it is a string
imdbid = get_imdb(nzbName, dirName)
if ssl:
protocol = "https://"
@ -75,6 +146,12 @@ def process(dirName, nzbName=None, status=0):
if nzbName == "Manual Run":
delay = 0
baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey + "/"
movie_id = get_movie_info(myOpener, baseURL, imdbid) # get the CPS database movie id this movie.
initial_status = get_status(myOpener, baseURL, movie_id)
process_all_exceptions(nzbName.lower(), dirName)
if status == 0:
@ -83,9 +160,9 @@ def process(dirName, nzbName=None, status=0):
else:
command = "renamer.scan"
url = protocol + host + ":" + port + web_root + "/api/" + apikey + "/" + command
url = baseURL + command
Logger.info("waiting for %s seconds to allow CPS to process newly extracted files", str(delay))
Logger.info("Waiting for %s seconds to allow CPS to process newly extracted files", str(delay))
time.sleep(delay)
@ -95,23 +172,27 @@ def process(dirName, nzbName=None, status=0):
urlObj = myOpener.openit(url)
except IOError, e:
Logger.error("Unable to open URL: %s", str(e))
sys.exit(1)
return 1 # failure
result = json.load(urlObj)
Logger.info("CouchPotatoServer returned %s", result)
if result['success']:
Logger.info("%s started on CouchPotatoServer for %s", command, nzbName1)
Logger.info("%s started on CouchPotatoServer for %s", command, nzbName)
else:
Logger.error("%s has NOT started on CouchPotatoServer for %s", command, nzbName1)
Logger.error("%s has NOT started on CouchPotatoServer for %s. Exiting", command, nzbName)
return 1 # failure
else:
Logger.info("download of %s has failed.", nzbName1)
Logger.info("trying to re-cue the next highest ranked release")
a=nzbName1.find('.cp(')+4
b=nzbName1[a:].find(')')+a
imdbid=nzbName1[a:b]
Logger.info("Download of %s has failed.", nzbName)
Logger.info("Trying to re-cue the next highest ranked release")
if not movie_id:
Logger.warning("Cound not find a movie in the database for release %s", nzbName)
Logger.warning("Please manually ignore this release and refresh the wanted movie")
Logger.error("Exiting autoProcessMovie script")
return 1 # failure
url = protocol + host + ":" + port + web_root + "/api/" + apikey + "/movie.list"
url = baseURL + "searcher.try_next/?id=" + movie_id
Logger.debug("Opening URL: %s", url)
@ -119,41 +200,32 @@ def process(dirName, nzbName=None, status=0):
urlObj = myOpener.openit(url)
except IOError, e:
Logger.error("Unable to open URL: %s", str(e))
sys.exit(1)
n=0
result = json.load(urlObj)
movieid = [item["id"] for item in result["movies"]]
library = [item["library"] for item in result["movies"]]
identifier = [item["identifier"] for item in library]
for index in range(len(movieid)):
if identifier[index] == imdbid:
movid = str(movieid[index])
Logger.info("found movie id %s in database for release %s", movid, nzbName1)
n = n + 1
break
if n == 0:
Logger.warning("cound not find a movie in the database for release %s", nzbName1)
Logger.warning("please manually ignore this release and refresh the wanted movie")
Logger.error("exiting postprocessing script")
sys.exit(1)
url = protocol + host + ":" + port + web_root + "/api/" + apikey + "/searcher.try_next/?id=" + movid
Logger.debug("Opening URL: %s", url)
try:
urlObj = myOpener.openit(url)
except IOError, e:
Logger.error("Unable to open URL: %s", str(e))
sys.exit(1)
return 1 # failure
result = urlObj.readlines()
for line in result:
Logger.info("%s", line)
Logger.info("movie %s set to try the next best release on CouchPotatoServer", movid)
Logger.info("Movie %s set to try the next best release on CouchPotatoServer", movie_id)
if delete_failed:
Logger.info("Deleting failed files and folder %s", dirName)
shutil.rmtree(dirName)
try:
shutil.rmtree(dirName)
except e:
Logger.error("Unable to delete folder %s due to: %s", dirName, str(e))
return 0 # success
if nzbName == "Manual Run":
return 0 # success
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
start = datetime.datetime.now() # set time for timeout
while (datetime.datetime.now() - start) < datetime.timedelta(minutes=2): # only wait 2 minutes, then return to TorrentToMedia
movie_status = get_status(myOpener, baseURL, movie_id) # get the current status fo this movie.
if movie_status != initial_status: # Something has changed. CPS must have processed this movie.
Logger.info("SUCCESS: This movie is now marked as status %s in CouchPotatoServer", movie_status)
return 0 # success
time.sleep(20) # Just stop this looping infinitely and hogging resources for 2 minutes ;)
else: # The status hasn't changed. we have waited 2 minutes which is more than enough. uTorrent can resule seeding now.
Logger.warning("The movie does not appear to have changed status after 2 minutes. Please check CouchPotato Logs")
return 1 # failure

View file

@ -57,7 +57,7 @@ def processEpisode(dirName, nzbName=None, failed=False):
if not os.path.isfile(configFilename):
Logger.error("You need an autoProcessMedia.cfg file - did you rename and edit the .sample?")
sys.exit(-1)
return 1 # failure
try:
fp = open(configFilename, "r")
@ -65,7 +65,7 @@ def processEpisode(dirName, nzbName=None, failed=False):
fp.close()
except IOError, e:
Logger.error("Could not read configuration file: %s", str(e))
sys.exit(1)
return 1 # failure
watch_dir = ""
host = config.get("SickBeard", "host")
@ -98,7 +98,7 @@ def processEpisode(dirName, nzbName=None, failed=False):
#allows manual call of postprocess script if we have specified a watch_dir. Check that here.
if nzbName == "Manual Run" and watch_dir == "":
Logger.error("In order to run this script manually you must specify a watch_dir in autoProcessTV.cfg")
sys.exit(-1)
return 1 # failure
#allows us to specify the default watch directory and call the postproecssing on another PC with different directory structure.
if watch_dir != "":
dirName = watch_dir
@ -126,7 +126,7 @@ def processEpisode(dirName, nzbName=None, failed=False):
# the standard Master bamch of SickBeard cannot process failed downloads. So Exit here.
if status:
Logger.info("The download failed. Nothing to process")
sys.exit()
return 0 # Success (as far as this script is concerned)
else:
Logger.info("The download succeeded. Sending process request to SickBeard")
@ -145,8 +145,9 @@ def processEpisode(dirName, nzbName=None, failed=False):
urlObj = myOpener.openit(url)
except IOError, e:
Logger.error("Unable to open URL: %s", str(e))
sys.exit(1)
return 1 # failure
result = urlObj.readlines()
for line in result:
Logger.info("%s", line)
return 0 # Success

View file

@ -0,0 +1,56 @@
7-Zip
~~~~~
License for use and distribution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7-Zip Copyright (C) 1999-2012 Igor Pavlov.
Licenses for files are:
1) 7z.dll: GNU LGPL + unRAR restriction
2) All other files: GNU LGPL
The GNU LGPL + unRAR restriction means that you must follow both
GNU LGPL rules and unRAR restriction rules.
Note:
You can use 7-Zip on any computer, including a computer in a commercial
organization. You don't need to register or pay for 7-Zip.
GNU LGPL information
--------------------
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You can receive a copy of the GNU Lesser General Public License from
http://www.gnu.org/
unRAR restriction
-----------------
The decompression engine for RAR archives was developed using source
code of unRAR program.
All copyrights to original unRAR code are owned by Alexander Roshal.
The license for original unRAR code has the following restriction:
The unRAR sources cannot be used to re-create the RAR compression algorithm,
which is proprietary. Distribution of modified unRAR sources in separate form
or as a part of other software is permitted, provided that it is clearly
stated in the documentation and source comments that the code may
not be used to develop a RAR (WinRAR) compatible archiver.
--
Igor Pavlov

View file

@ -0,0 +1,56 @@
7-Zip
~~~~~
License for use and distribution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7-Zip Copyright (C) 1999-2012 Igor Pavlov.
Licenses for files are:
1) 7z.dll: GNU LGPL + unRAR restriction
2) All other files: GNU LGPL
The GNU LGPL + unRAR restriction means that you must follow both
GNU LGPL rules and unRAR restriction rules.
Note:
You can use 7-Zip on any computer, including a computer in a commercial
organization. You don't need to register or pay for 7-Zip.
GNU LGPL information
--------------------
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You can receive a copy of the GNU Lesser General Public License from
http://www.gnu.org/
unRAR restriction
-----------------
The decompression engine for RAR archives was developed using source
code of unRAR program.
All copyrights to original unRAR code are owned by Alexander Roshal.
The license for original unRAR code has the following restriction:
The unRAR sources cannot be used to re-create the RAR compression algorithm,
which is proprietary. Distribution of modified unRAR sources in separate form
or as a part of other software is permitted, provided that it is clearly
stated in the documentation and source comments that the code may
not be used to develop a RAR (WinRAR) compatible archiver.
--
Igor Pavlov

View file

@ -36,7 +36,7 @@ def which(program):
return None
def extract(dirpath, file, outputDestination):
def extract(filePath, outputDestination):
# Using Windows
if os.name == 'nt':
if os_platform() == 'AMD64':
@ -89,7 +89,6 @@ def extract(dirpath, file, outputDestination):
Logger.warn("EXTRACTOR: No archive extracting programs found, plugin will be disabled")
ext = os.path.splitext(file)
filePath = os.path.join(dirpath, file)
if ext[1] in (".gz", ".bz2", ".lzma"):
# Check if this is a tar
if os.path.splitext(ext[0])[1] == ".tar":

View file

@ -10,6 +10,7 @@ from nzbToMediaUtil import *
nzbtomedia_configure_logging(os.path.dirname(sys.argv[0]))
Logger = logging.getLogger(__name__)
Logger.info("====================") # Seperate old from new log
Logger.info("nzbToCouchPotato %s", VERSION)
# SABnzbd
@ -23,7 +24,7 @@ if len(sys.argv) == 8:
# 6 Group that the NZB was posted in e.g. alt.binaries.x
# 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2
Logger.info("Script triggered from SABnzbd, starting autoProcessMovie...")
autoProcessMovie.process(sys.argv[1], sys.argv[2], sys.argv[7])
result = autoProcessMovie.process(sys.argv[1], sys.argv[2], sys.argv[7])
# NZBGet
elif len(sys.argv) == 4:
@ -32,10 +33,9 @@ elif len(sys.argv) == 4:
# 2 The original name of the NZB file
# 3 The status of the download: 0 == successful
Logger.info("Script triggered from NZBGet, starting autoProcessMovie...")
autoProcessMovie.process(sys.argv[1], sys.argv[2], sys.argv[3])
result = autoProcessMovie.process(sys.argv[1], sys.argv[2], sys.argv[3])
else:
Logger.warn("Invalid number of arguments received from client.")
Logger.info("Running autoProcessMovie as a manual run...")
autoProcessMovie.process('Manual Run', 'Manual Run', 0)
result = autoProcessMovie.process('Manual Run', 'Manual Run', 0)

View file

@ -11,7 +11,7 @@ Logger = logging.getLogger()
def process_all_exceptions(name, dirname):
for group, exception in __customgroups__.items():
if not group in name:
if not (group in name or group in dirname):
continue
process_exception(exception, name, dirname)
@ -23,7 +23,7 @@ def process_exception(exception, name, dirname):
def process_qoq(filename, dirname):
Logger.debug("Reversing the file name for a QoQ release %s", filename)
head, fileExtention = os.path.splitext(filename)
head, fileExtention = os.path.splitext(os.path.basename(filename))
newname = head[::-1]
newfile = newname + fileExtention
newfilePath = os.path.join(dirname, newfile)
@ -32,4 +32,4 @@ def process_qoq(filename, dirname):
# dict for custom groups
# we can add more to this list
__customgroups__ = {'[=-< Q o Q >-=]': process_qoq}
__customgroups__ = {'Q o Q': process_qoq}

View file

@ -1,10 +1,17 @@
import logging
import logging.config
import os
import re
import sys
import linktastic.linktastic as linktastic
Logger = logging.getLogger(__name__)
Logger = logging.getLogger()
def safeName(name):
safename = re.sub(r"[\/\\\:\*\?\"\<\>\|]", "", name) #make this name safe for use in directories for windows etc.
return safename
def nzbtomedia_configure_logging(dirname):
@ -26,6 +33,168 @@ def create_destination(outputDestination):
Logger.error("CREATE DESTINATION: Not possible to create destination folder: %s. Exiting", e)
sys.exit(-1)
def category_search(inputDirectory, inputName, inputCategory, root, categories):
categorySearch = [os.path.normpath(inputDirectory), ""] # initializie
notfound = 0
for x in range(10): # loop up through 10 directories looking for category.
try:
categorySearch2 = os.path.split(os.path.normpath(categorySearch[0]))
except: # this might happen when we can't go higher.
if inputCategory and inputName: # if these exists, we are ok to proceed, but assume we are in a root/common directory.
Logger.info("SEARCH: Could not find a Torrent Name or category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 1
break # we are done
elif inputCategory: # if this exists, we are ok to proceed, but assume we are in a root/common directory and we have to check file dates.
Logger.info("SEARCH: Could not find a Torrent Name or Category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 2
break # we are done
else:
Logger.error("SEARCH: Could not identify Category of Torrent Name in the directory structure. Please check downloader settings. Exiting")
sys.exit(-1)
if categorySearch2[1] in categories:
Logger.debug("SEARCH: Found Category: %s in directory structure", categorySearch2[1])
if not inputCategory:
Logger.info("SEARCH: Determined Category to be: %s", categorySearch2[1])
inputCategory = categorySearch2[1]
if inputName and categorySearch[0] != os.path.normpath(inputDirectory): # if we are not in the root directory and we have inputName we can continue.
if ('.cp(tt' in categorySearch[1]) and (not '.cp(tt' in inputName): # if the directory was created by CouchPotato, and this tag is not in Torrent name, we want to add it.
Logger.info("SEARCH: Changing Torrent Name to %s to preserve imdb id.", categorySearch[1])
inputName = categorySearch[1]
Logger.info("SEARCH: Identified Category: %s and Torrent Name: %s. We are in a unique directory, so we can proceed.", inputCategory, inputName)
break # we are done
elif categorySearch[1] and not inputName: # assume the the next directory deep is the torrent name.
Logger.info("SEARCH: Found torrent directory %s in category directory %s", os.path.join(categorySearch[0], categorySearch[1]), categorySearch[0])
inputName = categorySearch[1]
break # we are done
elif ('.cp(tt' in categorySearch[1]) and (not '.cp(tt' in inputName): # if the directory was created by CouchPotato, and this tag is not in Torrent name, we want to add it.
Logger.info("SEARCH: Changing Torrent Name to %s to preserve imdb id.", categorySearch[1])
inputName = categorySearch[1]
break # we are done
elif os.path.isdir(os.path.join(categorySearch[0], inputName)) and inputName: # testing for torrent name in first sub directory
Logger.info("SEARCH: Found torrent directory %s in category directory %s", os.path.join(categorySearch[0], inputName), categorySearch[0])
if categorySearch[0] == os.path.normpath(inputDirectory): # only true on first pass, x =0
inputDirectory = os.path.join(categorySearch[0], inputName) # we only want to search this next dir up.
break # we are done
elif os.path.isdir(os.path.join(categorySearch[0], safeName(inputName))) and inputName: # testing for torrent name in first sub directory
Logger.info("SEARCH: Found torrent directory %s in category directory %s", os.path.join(categorySearch[0], safeName(inputName)), categorySearch[0])
if categorySearch[0] == os.path.normpath(inputDirectory): # only true on first pass, x =0
inputDirectory = os.path.join(categorySearch[0], safeName(inputName)) # we only want to search this next dir up.
break # we are done
elif inputName: # if these exists, we are ok to proceed, but we are in a root/common directory.
Logger.info("SEARCH: Could not find a unique torrent folder in the directory structure")
Logger.info("SEARCH: The directory passed is the root directory for category %s", categorySearch2[1])
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 1
break # we are done
else: # this is a problem! if we don't have Torrent name and are in the root category dir, we can't proceed.
Logger.warn("SEARCH: Could not identify a torrent name and the directory passed is common to all downloads for category %s.", categorySearch[1])
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 2
break
elif safeName(categorySearch2[1]) == safeName(inputName) and inputName: # we have identified a unique directory.
Logger.info("SEARCH: Files appear to be in their own directory")
if inputCategory: # we are ok to proceed.
break # we are done
else:
Logger.debug("SEARCH: Continuing scan to determin category.")
categorySearch = categorySearch2 # ready for next loop
continue # keep going
else:
if x == 9: # This is the last pass in the loop and we didn't find anything.
notfound = 1
break # we are done
else:
categorySearch = categorySearch2 # ready for next loop
continue # keep going
if notfound == 1:
if inputCategory and inputName: # if these exists, we are ok to proceed, but assume we are in a root/common directory.
Logger.info("SEARCH: Could not find a category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 1
elif inputCategory: # if this exists, we are ok to proceed, but assume we are in a root/common directory and we have to check file dates.
Logger.info("SEARCH: Could not find a Torrent Name or Category in the directory structure")
Logger.info("SEARCH: We assume the directory passed is the root directory for your downlaoder")
Logger.warn("SEARCH: You should change settings to download torrents to their own directory if possible")
Logger.info("SEARCH: We will try and determine which files to process, individually")
root = 2
if not inputCategory: # we didn't find this after 10 loops. This is a problem.
Logger.error("SEARCH: Could not identify category and torrent name from the directory structure. Please check downloader settings. Exiting")
sys.exit(-1) # Oh yeah.... WE ARE DONE!
return inputDirectory, inputName, inputCategory, root
def is_sample(filePath, inputName, minSampleSize):
# 200 MB in bytes
SIZE_CUTOFF = minSampleSize * 1024 * 1024
# Ignore 'sample' in files unless 'sample' in Torrent Name
return ('sample' in filePath.lower()) and (not 'sample' in inputName) and (os.path.getsize(filePath) < SIZE_CUTOFF)
def copy_link(filePath, targetDirectory, useLink, outputDestination):
create_destination(outputDestination)
if useLink:
try:
Logger.info("COPYLINK: Linking %s to %s", filePath, targetDirectory)
linktastic.link(filePath, targetDirectory)
except:
if os.path.isfile(targetDirectory):
Logger.info("COPYLINK: Something went wrong in linktastic.link, but the destination file was created")
else:
Logger.info("COPYLINK: Something went wrong in linktastic.link, copying instead")
Logger.debug("COPYLINK: Copying %s to %s", filePath, targetDirectory)
shutil.copy(filePath, targetDirectory)
else:
Logger.debug("Copying %s to %s", filePath, targetDirectory)
shutil.copy(filePath, targetDirectory)
return True
def flatten(outputDestination):
Logger.info("FLATTEN: Flattening directory: %s", outputDestination)
for dirpath, dirnames, filenames in os.walk(outputDestination): # Flatten out the directory to make postprocessing easier
if dirpath == outputDestination:
continue # No need to try and move files in the root destination directory
for filename in filenames:
source = os.path.join(dirpath, filename)
target = os.path.join(outputDestination, filename)
try:
shutil.move(source, target)
except OSError:
Logger.error("FLATTEN: Could not flatten %s", source)
removeEmptyFolders(outputDestination) # Cleanup empty directories
def removeEmptyFolders(path):
Logger.info("REMOVER: Removing empty folders in: %s", path)
if not os.path.isdir(path):
return
# Remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
removeEmptyFolders(fullpath)
# If folder empty, delete it
files = os.listdir(path)
if len(files) == 0:
Logger.debug("REMOVER: Removing empty folder: %s", path)
os.rmdir(path)
def iterate_media_files(dirname):
mediaContainer = [ '.mkv', '.avi', '.divx', '.xvid', '.mov', '.wmv',
@ -33,7 +202,7 @@ def iterate_media_files(dirname):
for dirpath, dirnames, filesnames in os.walk(dirname):
for filename in filesnames:
fileExtention = os.path.splitext(filename)[0]
fileExtention = os.path.splitext(filename)[1]
if not (fileExtention in mediaContainer):
continue
yield dirpath, os.path.join(dirpath, filename)

View file

@ -32,6 +32,7 @@ from nzbToMediaUtil import *
nzbtomedia_configure_logging(os.path.dirname(sys.argv[0]))
Logger = logging.getLogger(__name__)
Logger.info("====================") # Seperate old from new log
Logger.info("nzbToSickBeard %s", VERSION)
# SABnzbd
@ -45,7 +46,7 @@ if len(sys.argv) == 8:
# 6 Group that the NZB was posted in e.g. alt.binaries.x
# 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2
Logger.info("Script triggered from SABnzbd, starting autoProcessTV...")
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2], sys.argv[7])
result = autoProcessTV.processEpisode(sys.argv[1], sys.argv[2], sys.argv[7])
# NZBGet
elif len(sys.argv) == 4:
@ -54,9 +55,9 @@ elif len(sys.argv) == 4:
# 2 The original name of the NZB file
# 3 The status of the download: 0 == successful
Logger.info("Script triggered from NZBGet, starting autoProcessTV...")
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2], sys.argv[3])
result = autoProcessTV.processEpisode(sys.argv[1], sys.argv[2], sys.argv[3])
else:
Logger.debug("Invalid number of arguments received from client.")
Logger.info("Running autoProcessTV as a manual run...")
autoProcessTV.processEpisode('Manual Run', 'Manual Run', 0)
result = autoProcessTV.processEpisode('Manual Run', 'Manual Run', 0)