Re-coded autoProcessMovie to now retrieve the imdbID via a API call to IMDB if we can't find one to be used in narrowing our release search results down plus we use the download_id if present to help narrow the results even more.

The results returned are a dictionary of releases instead of just variables so that after a call to CP renamer we can call our function to get the new release results and run a comnparison and get the changed key/val and check status along with other variables that may have changed.

This new code will allow for manually downloading movies that normally could not be snatched from CP it self due to provider results not containing them, simply placing the newly downloaded files into the post-process folder and running our scripts manually will allow CP to manually post-process and add the movie to its database properly now.
This commit is contained in:
echel0n 2014-04-17 03:53:15 -07:00
commit 94354bb7d9
3 changed files with 108 additions and 143 deletions

View file

@ -1,117 +1,101 @@
import os import os
import re import re
import time import time
import datetime
import urllib import urllib
import shutil
import sys
import platform
import nzbtomedia import nzbtomedia
from lib import requests from lib import requests
from nzbtomedia.Transcoder import Transcoder from nzbtomedia.Transcoder import Transcoder
from nzbtomedia.nzbToMediaSceneExceptions import process_all_exceptions from nzbtomedia.nzbToMediaSceneExceptions import process_all_exceptions
from nzbtomedia.nzbToMediaUtil import convert_to_ascii, delete, create_torrent_class from nzbtomedia.nzbToMediaUtil import convert_to_ascii, delete, create_torrent_class
from nzbtomedia import logger from nzbtomedia import logger
from nzbtomedia.transmissionrpc.client import Client as TransmissionClient
class autoProcessMovie: class autoProcessMovie:
def find_release_info(self, baseURL, download_id, dirName, nzbName, clientAgent): def find_imdbid(self, dirName, nzbName):
imdbid = None
release_id = None
media_id = None
release_status = None
downloader = None
while(True):
# find imdbid in nzbName
m = re.search('(tt\d{7})', nzbName)
if m:
imdbid = m.group(1)
logger.postprocess("Found imdbid %s in name", imdbid)
break
# find imdbid in dirName # find imdbid in dirName
m = re.search('(tt\d{7})', dirName) m = re.search('(tt\d{7})', dirName)
if m: if m:
imdbid = m.group(1) imdbid = m.group(1)
logger.postprocess("Found movie id %s in directory", imdbid) logger.postprocess("Found movie id %s in directory", imdbid)
break return imdbid
break
url = baseURL + "/media.list/?release_status=snatched" # find imdbid in nzbName
m = re.search('(tt\d{7})', nzbName)
if m:
imdbid = m.group(1)
logger.postprocess("Found imdbid %s in name", imdbid)
return imdbid
m = re.search("^(.+)\W(\d{4})", os.path.basename(dirName))
if m:
title = m.group(1)
year = m.group(2)
url = "http://www.omdbapi.com"
logger.debug("Opening URL: %s", url) logger.debug("Opening URL: %s", url)
try: try:
r = requests.get(url) r = requests.get(url, params={'y':year, 't':title})
except requests.ConnectionError:
logger.error("Unable to open URL")
return
results = r.json()
if hasattr(results, 'imdbID'):
return results['imdbID']
def get_releases(self, baseURL, download_id, dirName, nzbName):
releases = {}
params = {}
imdbid = self.find_imdbid(dirName, nzbName)
# determin cmd and params to send to CouchPotato to get our results
section = 'movies'
cmd = "/media.list"
params['status'] = 'active'
if imdbid:
section = 'media'
cmd = "/media.get"
params['id'] = imdbid
if download_id:
params['release_status'] = 'snatched,downloaded'
url = baseURL + cmd
logger.debug("Opening URL: %s", url)
try:
r = requests.get(url, params=params)
except requests.ConnectionError: except requests.ConnectionError:
logger.error("Unable to open URL") logger.error("Unable to open URL")
return return
results = r.json() results = r.json()
def search_results(results, clientAgent):
last_edit = {}
try: try:
for movie in results['movies']: movies = results[section]
if imdbid: if not isinstance(movies, list):
if imdbid != movie['identifiers']['imdb']: movies = [movies]
for movie in movies:
for release in movie['releases']:
if download_id and download_id != release['download_info']['id']:
continue continue
for i, release in enumerate(movie['releases']): releases[release['_id']] = release
if release['status'] != 'snatched':
continue
if download_id:
if release['download_info']['id'] == download_id:
return release
# store releases by datetime just incase we need to use this info
last_edit.update({datetime.datetime.fromtimestamp(release['last_edit']):release})
except:pass except:pass
if last_edit: return releases
last_edit = sorted(last_edit.items())
if clientAgent != 'manual':
for item in last_edit:
release = item[1]
if release['download_info']['downloader'] == clientAgent:
return release
release = last_edit[0][1] def releases_diff(self, dict_a, dict_b):
return release return dict([
(key, dict_b.get(key, dict_a.get(key)))
matched_release = search_results(results, clientAgent) for key in set(dict_a.keys() + dict_b.keys())
if (
if matched_release: (key in dict_a and (not key in dict_b or dict_a[key] != dict_b[key])) or
try: (key in dict_b and (not key in dict_a or dict_a[key] != dict_b[key]))
release_id = matched_release['_id'] )
media_id = matched_release['media_id'] ])
release_status = matched_release['status']
download_id = matched_release['download_info']['id']
downloader = matched_release['download_info']['downloader']
except:pass
return media_id, download_id, release_id, imdbid, release_status, downloader
def get_status(self, baseURL, media_id, release_id):
logger.debug("Attempting to get current status for movie:%s", media_id)
url = baseURL + "/media.get"
logger.debug("Opening URL: %s", url)
try:
r = requests.get(url, params={'id':media_id})
except requests.ConnectionError:
logger.error("Unable to open URL")
return
try:
result = r.json()
for release in result["media"]['releases']:
if release['_id'] == release_id:
return release["status"]
except:pass
def process(self, dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None): def process(self, dirName, nzbName=None, status=0, clientAgent = "manual", download_id = "", inputCategory=None):
if dirName is None: if dirName is None:
@ -164,16 +148,10 @@ class autoProcessMovie:
baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey baseURL = protocol + host + ":" + port + web_root + "/api/" + apikey
media_id, download_id, release_id, imdbid, release_status, downloader = self.find_release_info(baseURL, download_id, dirName, nzbName, clientAgent) releases = self.get_releases(baseURL, download_id, dirName, nzbName)
if release_status: if not releases:
if release_status != "snatched": logger.error("Could not find any releases marked as WANTED on CouchPotato to compare changes against %s, skipping ...", nzbName)
logger.warning("%s is marked with a status of %s on CouchPotato, skipping ...", nzbName, release_status)
return 0
elif imdbid and not (download_id or media_id or release_id):
logger.error("Could only find a imdbID for %s, sending folder name to be post-processed by CouchPotato ...", nzbName)
else:
logger.error("Could not find a release status for %s on CouchPotato, skipping ...", nzbName)
return 1 return 1
process_all_exceptions(nzbName.lower(), dirName) process_all_exceptions(nzbName.lower(), dirName)
@ -194,7 +172,7 @@ class autoProcessMovie:
params = {} params = {}
if download_id: if download_id:
params['downloader'] = downloader params['downloader'] = clientAgent
params['download_id'] = download_id params['download_id'] = download_id
params['media_folder'] = urllib.quote(dirName) params['media_folder'] = urllib.quote(dirName)
@ -206,6 +184,8 @@ class autoProcessMovie:
logger.debug("Opening URL: %s", url) logger.debug("Opening URL: %s", url)
logger.postprocess("Attempting to perform a %s scan on CouchPotato for %s", method, nzbName)
try: try:
r = requests.get(url, params=params) r = requests.get(url, params=params)
except requests.ConnectionError: except requests.ConnectionError:
@ -213,13 +193,10 @@ class autoProcessMovie:
return 1 # failure return 1 # failure
result = r.json() result = r.json()
logger.postprocess("CouchPotatoServer returned %s", result)
if result['success']: if result['success']:
logger.postprocess("%s scan started on CouchPotatoServer for %s", method, nzbName) logger.postprocess("SUCCESS: %s scan started on CouchPotatoServer for %s", method, nzbName)
else: else:
logger.error("%s scan has NOT started on CouchPotatoServer for %s. Exiting", method, nzbName) logger.error("FAILED: %s scan has NOT started on CouchPotato for %s. Exiting ...", method, nzbName)
return 1 # failure return 1 # failure
else: else:
@ -230,11 +207,13 @@ class autoProcessMovie:
delete(dirName) delete(dirName)
if not download_id: if not download_id:
logger.warning("Cound not find a movie in the database for release %s", nzbName) logger.warning("Could not find a movie in the database for release %s", nzbName)
logger.warning("Please manually ignore this release and refresh the wanted movie") logger.warning("Please manually ignore this release and refresh the wanted movie from CouchPotato, Exiting ...")
logger.error("Exiting autoProcessMovie script")
return 1 # failure return 1 # failure
release_id = releases.keys()[0]
media_id = releases[release_id]['media_id']
logger.postprocess("Ignoring current failed release %s ...", nzbName) logger.postprocess("Ignoring current failed release %s ...", nzbName)
url = baseURL + "/release.ignore" url = baseURL + "/release.ignore"
@ -265,41 +244,20 @@ class autoProcessMovie:
result = r.json() result = r.json()
if result['success']: if result['success']:
logger.postprocess("CouchPotato successfully snatched the next highest release ...", nzbName) logger.postprocess("CouchPotato successfully snatched the next highest release above %s ...", nzbName)
return 0 return 0
else: else:
logger.postprocess("CouchPotato was unable to find a higher release then %s to snatch ...", nzbName) logger.postprocess("CouchPotato was unable to find a higher release then %s to snatch ...", nzbName)
return 1 return 1
if not (download_id or media_id or release_id) and imdbid:
url = baseURL + "/media.get"
logger.debug("Opening URL: %s", url)
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * int(wait_for) timeout = time.time() + 60 * int(wait_for)
while (time.time() < timeout): # only wait 2 (default) minutes, then return. while (time.time() < timeout): # only wait 2 (default) minutes, then return.
try: releases_current = self.get_releases(baseURL, download_id, dirName, nzbName)
r = requests.get(url, params={'id':imdbid}) releasesDiff = self.releases_diff(releases, releases_current)
except requests.ConnectionError: if releasesDiff: # Something has changed. CPS must have processed this movie.
logger.error("Unable to open URL") release_status = releasesDiff[releasesDiff.keys()[0]]['status']
return 1 # failure logger.postprocess("SUCCESS: Release %s marked as [%s] on CouchPotato", nzbName, release_status)
result = r.json()
if result['media']['status'] == 'ignored':
logger.postprocess("CouchPotato successfully added %s to it's database ...", nzbName)
return 0
logger.postprocess("CouchPotato was unable to add %s to its database ...", nzbName)
return 1
elif not (download_id or media_id or release_id):
return 1
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * int(wait_for)
while (time.time() < timeout): # only wait 2 (default) minutes, then return.
current_status = self.get_status(baseURL, media_id, release_id)
if current_status is not None and current_status != release_status: # Something has changed. CPS must have processed this movie.
logger.postprocess("SUCCESS: This release is now marked as status [%s] in CouchPotatoServer", current_status.upper())
return 0 # success return 0 # success
# pause and let CouchPotatoServer catch its breath # pause and let CouchPotatoServer catch its breath

View file

@ -408,19 +408,15 @@ def get_dirnames(section, subsections=None):
if watch_dir: if watch_dir:
dirNames.extend([os.path.join(watch_dir, o) for o in os.listdir(watch_dir) if dirNames.extend([os.path.join(watch_dir, o) for o in os.listdir(watch_dir) if
os.path.isdir(os.path.join(watch_dir, o))]) os.path.isdir(os.path.join(watch_dir, o))])
if not dirNames:
logger.warning("%s:%s has no directories identified to scan inside %s", section, subsection, watch_dir)
if outputDirectory: if outputDirectory:
dirNames.extend([os.path.join(outputDirectory, o) for o in os.listdir(outputDirectory) if dirNames.extend([os.path.join(outputDirectory, o) for o in os.listdir(outputDirectory) if
os.path.isdir(os.path.join(outputDirectory, o))]) os.path.isdir(os.path.join(outputDirectory, o))])
if not dirNames: if not dirNames:
logger.warning("%s:%s has no directories identified to scan inside %s", section, subsection, outputDirectory) logger.warning("%s:%s has no directories identified for post-processing", section, subsection)
if watch_dir is None and outputDirectory is None: return list(set(dirNames))
logger.warning("%s:%s has no watch_dir or outputDirectory setup to be Scanned, go fix you autoProcessMedia.cfg file.", section, subsection)
return dirNames
def delete(dirName): def delete(dirName):
logger.info("Deleting failed files and folder %s", dirName) logger.info("Deleting failed files and folder %s", dirName)

File diff suppressed because one or more lines are too long