mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-08-22 06:13:25 -07:00
Merge 4808745f00
into 14d4940d05
This commit is contained in:
commit
fcf73da080
5 changed files with 356 additions and 6 deletions
28
lib/requests_futures/__init__.py
Normal file
28
lib/requests_futures/__init__.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Requests Futures
|
||||
|
||||
"""
|
||||
async requests HTTP library
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
"""
|
||||
|
||||
__title__ = 'requests-futures'
|
||||
__version__ = '0.9.5'
|
||||
__build__ = 0x000000
|
||||
__author__ = 'Ross McFarland'
|
||||
__license__ = 'Apache 2.0'
|
||||
__copyright__ = 'Copyright 2013 Ross McFarland'
|
||||
|
||||
# Set default logging handler to avoid "No handler found" warnings.
|
||||
import logging
|
||||
try: # Python 2.7+
|
||||
from logging import NullHandler
|
||||
except ImportError:
|
||||
class NullHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
logging.getLogger(__name__).addHandler(NullHandler())
|
73
lib/requests_futures/sessions.py
Normal file
73
lib/requests_futures/sessions.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
requests_futures
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a small add-on for the requests http library. It makes use
|
||||
of python 3.3's concurrent.futures or the futures backport for previous
|
||||
releases of python.
|
||||
|
||||
from requests_futures import FuturesSession
|
||||
|
||||
session = FuturesSession()
|
||||
# request is run in the background
|
||||
future = session.get('http://httpbin.org/get')
|
||||
# ... do other stuff ...
|
||||
# wait for the request to complete, if it hasn't already
|
||||
response = future.result()
|
||||
print('response status: {0}'.format(response.status_code))
|
||||
print(response.content)
|
||||
|
||||
"""
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from requests import Session
|
||||
from requests.adapters import DEFAULT_POOLSIZE, HTTPAdapter
|
||||
|
||||
class FuturesSession(Session):
|
||||
|
||||
def __init__(self, executor=None, max_workers=2, *args, **kwargs):
|
||||
"""Creates a FuturesSession
|
||||
|
||||
Notes
|
||||
~~~~~
|
||||
|
||||
* ProcessPoolExecutor is not supported b/c Response objects are
|
||||
not picklable.
|
||||
|
||||
* If you provide both `executor` and `max_workers`, the latter is
|
||||
ignored and provided executor is used as is.
|
||||
"""
|
||||
super(FuturesSession, self).__init__(*args, **kwargs)
|
||||
if executor is None:
|
||||
executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
# set connection pool size equal to max_workers if needed
|
||||
if max_workers > DEFAULT_POOLSIZE:
|
||||
adapter_kwargs = dict(pool_connections=max_workers,
|
||||
pool_maxsize=max_workers)
|
||||
self.mount('https://', HTTPAdapter(**adapter_kwargs))
|
||||
self.mount('http://', HTTPAdapter(**adapter_kwargs))
|
||||
|
||||
self.executor = executor
|
||||
|
||||
def request(self, *args, **kwargs):
|
||||
"""Maintains the existing api for Session.request.
|
||||
|
||||
Used by all of the higher level methods, e.g. Session.get.
|
||||
|
||||
The background_callback param allows you to do some processing on the
|
||||
response in the background, e.g. call resp.json() so that json parsing
|
||||
happens in the background thread.
|
||||
"""
|
||||
func = sup = super(FuturesSession, self).request
|
||||
|
||||
background_callback = kwargs.pop('background_callback', None)
|
||||
if background_callback:
|
||||
def wrap(*args_, **kwargs_):
|
||||
resp = sup(*args_, **kwargs_)
|
||||
background_callback(self, resp)
|
||||
return resp
|
||||
|
||||
func = wrap
|
||||
|
||||
return self.executor.submit(func, *args, **kwargs)
|
|
@ -28,7 +28,23 @@ import os
|
|||
import json
|
||||
import xmltodict
|
||||
import math
|
||||
from functools import wraps
|
||||
|
||||
def profile_func(func):
|
||||
@wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
from plexpy import logger
|
||||
start = time.time()
|
||||
res = func(*args, **kwargs)
|
||||
logger.debug('%s took %s' % (func.__name__, time.time() - start))
|
||||
return res
|
||||
return inner
|
||||
|
||||
def tobool(s):
|
||||
if s in [1, '1', 'on', 'yes', 'Yes']:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def multikeysort(items, columns):
|
||||
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
|
||||
|
|
|
@ -13,11 +13,16 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import urllib2
|
||||
import time
|
||||
import concurrent.futures as cf
|
||||
from requests_futures.sessions import FuturesSession
|
||||
|
||||
from plexpy import logger, helpers, users, http_handler, common
|
||||
from urlparse import urlparse
|
||||
from request import request_json
|
||||
|
||||
import plexpy
|
||||
import urllib2
|
||||
|
||||
def get_server_friendly_name():
|
||||
logger.info("Requesting name from server...")
|
||||
|
@ -692,11 +697,225 @@ class PmsConnect(object):
|
|||
output = {'metadata': metadata_list}
|
||||
return output
|
||||
|
||||
"""
|
||||
Return processed and validated session list.
|
||||
|
||||
Output: array
|
||||
"""
|
||||
def get_watched_status(self, sort=None, sections='all', all_params=False, get_file_size=True,
|
||||
exclude_path=None, watched_older_then=None, hide_watched=0, ignore_section='',
|
||||
*args, **kwargs):
|
||||
|
||||
"""
|
||||
Returns a list of all unwatched shows
|
||||
|
||||
named args: Used for enabled and disabling sorting/filtering
|
||||
kwargs: Used for filtering inside the dicts. Adding type="movie" will only returns movies
|
||||
|
||||
|
||||
Output: List for dicts
|
||||
|
||||
kwargs: Used for filtering inside the dicts. Adding type="movie" will only list movies
|
||||
|
||||
|
||||
Output: List for dicts
|
||||
|
||||
# Adding all_params=1 Makes the call insane slow.
|
||||
"""
|
||||
# Add a cache?
|
||||
|
||||
use_watched_older_then_sort = True
|
||||
if watched_older_then is None:
|
||||
use_watched_older_then_sort = False
|
||||
watched_older_then = time.time()
|
||||
else:
|
||||
watched_older_then = int(watched_older_then)
|
||||
|
||||
if plexpy.CONFIG.PMS_URL:
|
||||
url_parsed = urlparse(plexpy.CONFIG.PMS_URL)
|
||||
hostname = url_parsed.hostname
|
||||
port = url_parsed.port
|
||||
self.protocol = url_parsed.scheme
|
||||
else:
|
||||
hostname = plexpy.CONFIG.PMS_IP
|
||||
port = plexpy.CONFIG.PMS_PORT
|
||||
self.protocol = 'http'
|
||||
|
||||
b_url = '%s://%s:%s' % (self.protocol, hostname, port)
|
||||
|
||||
h = {'Accept': 'application/json',
|
||||
'X-Plex-Token': plexpy.CONFIG.PMS_TOKEN
|
||||
}
|
||||
|
||||
hide_watched = 'unwatched' if hide_watched else 'all'
|
||||
|
||||
def fetch(s=''):
|
||||
result = request_json(b_url + s, headers=h)
|
||||
if result:
|
||||
return result
|
||||
|
||||
def maybe_number(v):
|
||||
try:
|
||||
v = int(v)
|
||||
except:
|
||||
try:
|
||||
v = float(v)
|
||||
except:
|
||||
pass
|
||||
return v
|
||||
|
||||
result = fetch('/library/sections/all').get('_children')
|
||||
|
||||
# Final results for data grab
|
||||
f_result = []
|
||||
|
||||
# List of items and urls passed to req.f
|
||||
files_urls = []
|
||||
|
||||
# dicts from req.f is stored here
|
||||
futures_results = []
|
||||
|
||||
# Start fetching data
|
||||
if result:
|
||||
for sections in result:
|
||||
if ignore_section != sections['title']:
|
||||
section = fetch('/library/sections/%s/%s' % (sections['key'], hide_watched))
|
||||
for item in section['_children']:
|
||||
|
||||
d = {'title': item.get('title'),
|
||||
'type': item['type'],
|
||||
'ratingKey': item['ratingKey'],
|
||||
'updatedAt': item.get('updatedAt', 0),
|
||||
'addedAt': item.get('addedAt', 0),
|
||||
'viewCount': item.get('viewCount', 0),
|
||||
'files': [],
|
||||
'lastViewedAt': item.get('lastViewedAt', 0),
|
||||
'viewOffset': item.get('viewOffset', 0),
|
||||
'spaceUsed': 0, # custom
|
||||
'isSeen': 0, # custom
|
||||
'_elementType': item['_elementType']
|
||||
}
|
||||
|
||||
# Only movies has the files listed here
|
||||
if item['_elementType'] == 'Video':
|
||||
d['viewCount'] = item.get('viewCount', 0)
|
||||
if item.get('viewCount', 0) > 0:
|
||||
d['isSeen'] = 1
|
||||
|
||||
|
||||
# grab the file names and size
|
||||
if '_children' in item:
|
||||
for c in item['_children']:
|
||||
if '_children' in c:
|
||||
for part in c['_children']:
|
||||
f = part.get('file')
|
||||
s = part.get('size', 0)
|
||||
d['spaceUsed'] += s
|
||||
if f and s:
|
||||
d['files'].append({'size': s, 'file': f})
|
||||
|
||||
f_result.append(d)
|
||||
|
||||
|
||||
#elif item['_elementType'] == 'Track':
|
||||
# # I dont think it returns a "Track" as all
|
||||
# pass
|
||||
|
||||
elif item['_elementType'] == 'Directory':
|
||||
|
||||
if item['type'] == 'show' or item['type'] == 'artist':
|
||||
d['viewedLeafCount'] = item.get('viewedLeafCount', 0)
|
||||
d['leafCount'] = item.get('leafCount', 0)
|
||||
d['_elementType'] = item['_elementType']
|
||||
if item['type'] == 'show':
|
||||
# We are gonna assume a entire show is watched
|
||||
# Might be false it someone watched the same ep
|
||||
# over and over
|
||||
if d['viewedLeafCount'] >= d['leafCount'] and d['viewedLeafCount'] > 0:
|
||||
d['viewCount'] = item['viewedLeafCount'] # only set to easy filter
|
||||
d['isSeen'] = 1
|
||||
|
||||
elif item['type'] == 'artist':
|
||||
d['isSeen'] = 1 if d['viewCount'] > 0 else 0
|
||||
|
||||
if get_file_size: # Runs faster without
|
||||
files_urls.append((item, b_url + '/library/metadata/' + str(item['ratingKey']) + '/allLeaves'))
|
||||
else:
|
||||
f_result.append(d)
|
||||
|
||||
#elif item['type'] == 'artist':
|
||||
# pass # Handled above left for future stuff
|
||||
|
||||
|
||||
t_result = f_result
|
||||
|
||||
if get_file_size:
|
||||
future_sess = FuturesSession(max_workers=8)
|
||||
futs = {} # Future holder
|
||||
for zomg in files_urls:
|
||||
t = future_sess.get(zomg[1], timeout=5, headers=h)
|
||||
futs[t] = zomg[0]
|
||||
|
||||
for fut_obj in cf.as_completed(futs):
|
||||
try:
|
||||
res = fut_obj.result()
|
||||
jsn = res.json()
|
||||
f_item = futs[fut_obj]
|
||||
# Add the same dict as movies..
|
||||
d = {'title': f_item.get('title'),
|
||||
'type': f_item['type'],
|
||||
'ratingKey': f_item['ratingKey'],
|
||||
'updatedAt': f_item.get('updatedAt', 0),
|
||||
'addedAt': f_item.get('addedAt', 0),
|
||||
'viewCount': f_item.get('viewCount', 0),
|
||||
'files': [],
|
||||
'lastViewedAt': f_item.get('lastViewedAt', 0),
|
||||
'viewOffset': f_item.get('viewOffset', 0),
|
||||
'spaceUsed': 0, # custom
|
||||
'isSeen': 0, # custom
|
||||
}
|
||||
if f_item['_elementType'] == 'Directory':
|
||||
if f_item['type'] in ['show', 'artist']:
|
||||
if f_item['type'] == 'show':
|
||||
# We are gonna assume the user has watched if
|
||||
d['isSeen'] = 1 if f_item['leafCount'] >= f_item['viewedLeafCount'] and f_item['viewedLeafCount'] > 0 else 0
|
||||
d['viewCount'] = f_item['viewedLeafCount']
|
||||
elif f_item['type'] == 'artist':
|
||||
d['isSeen'] = 1 if d['viewCount'] > 0 else 0
|
||||
if '_children' in jsn:
|
||||
for e in jsn['_children']:
|
||||
if '_children' in e:
|
||||
for c in e['_children']:
|
||||
if '_children' in c:
|
||||
for cc in c['_children']:
|
||||
f = cc.get('file')
|
||||
s = cc.get('size', 0)
|
||||
d['spaceUsed'] += s
|
||||
if f and s:
|
||||
d['files'].append({'size': s, 'file': f})
|
||||
futures_results.append(d)
|
||||
|
||||
except Exception as error:
|
||||
logger.error('Error while trying to get/process a future %s' % error)
|
||||
|
||||
|
||||
t_result = t_result + futures_results
|
||||
|
||||
# If any user given kwargs was given filter on them.
|
||||
if kwargs:
|
||||
logger.debug('kwargs was given %s filtering the dicts based on them' % kwargs)
|
||||
if not all_params:
|
||||
t_result = [d for d in t_result if any(d.get(k) == maybe_number(kwargs[k]) for k in kwargs)]
|
||||
else:
|
||||
logger.debug('All kwargs is required to be in the list')
|
||||
t_result = [d for d in t_result if all(d.get(k, None) == maybe_number(kwargs[k]) for k in kwargs)]
|
||||
|
||||
if use_watched_older_then_sort:
|
||||
t_result = [i for i in t_result if not i['viewCount'] or i['lastViewedAt'] <= watched_older_then]
|
||||
|
||||
if sort:
|
||||
logger.debug('Sorted on %s' % sort)
|
||||
t_result = sorted(t_result, key=lambda k: k[sort], reverse=True)
|
||||
|
||||
return t_result
|
||||
|
||||
|
||||
def get_current_activity(self):
|
||||
session_data = self.get_sessions(output_format='xml')
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from plexpy import logger, notifiers, plextv, pmsconnect, common, log_reader, datafactory, graphs, users, helpers
|
||||
from plexpy.helpers import checked, radio
|
||||
from plexpy.helpers import checked, radio, profile_func, tobool
|
||||
|
||||
from mako.lookup import TemplateLookup
|
||||
from mako import exceptions
|
||||
|
@ -923,6 +923,20 @@ class WebInterface(object):
|
|||
else:
|
||||
logger.warn('Unable to retrieve data.')
|
||||
|
||||
@profile_func
|
||||
@cherrypy.tools.json_out()
|
||||
@cherrypy.expose
|
||||
def get_watched(self, sort=None, sections='all', all_params=False,
|
||||
get_file_size=True, exclude_path=None, watched_older_then=None,
|
||||
hide_watched=0, ignore_section='', **kwargs):
|
||||
""" See get_watched_status for docs"""
|
||||
all_params = tobool(all_params)
|
||||
get_file_size = tobool(get_file_size)
|
||||
hide_watched = tobool(hide_watched)
|
||||
pms_connect = pmsconnect.PmsConnect()
|
||||
t = pms_connect.get_watched_status(sort=sort, sections=sections, all_params=all_params, get_file_size=get_file_size, exclude_path=exclude_path, hide_watched=hide_watched, watched_older_then=None, ignore_section=ignore_section, **kwargs)
|
||||
return t
|
||||
|
||||
@cherrypy.expose
|
||||
def get_episode_list_json(self, rating_key='', **kwargs):
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue