Merge branch 'v2.5-export' into nightly

This commit is contained in:
JonnyWong16 2020-10-02 20:45:11 -07:00
commit 739c977cd7
No known key found for this signature in database
GPG key ID: B1F1F9807184697A
45 changed files with 6500 additions and 521 deletions

View file

@ -45,7 +45,7 @@ if PYTHON2:
import common
import database
import datafactory
import helpers
import exporter
import libraries
import logger
import mobile_app
@ -65,7 +65,7 @@ else:
from plexpy import common
from plexpy import database
from plexpy import datafactory
from plexpy import helpers
from plexpy import exporter
from plexpy import libraries
from plexpy import logger
from plexpy import mobile_app
@ -226,6 +226,8 @@ def initialize(config_file):
CONFIG.BACKUP_DIR, os.path.join(DATA_DIR, 'backups'), 'backups')
CONFIG.CACHE_DIR, _ = check_folder_writable(
CONFIG.CACHE_DIR, os.path.join(DATA_DIR, 'cache'), 'cache')
CONFIG.EXPORT_DIR, _ = check_folder_writable(
CONFIG.EXPORT_DIR, os.path.join(DATA_DIR, 'exports'), 'exports')
CONFIG.NEWSLETTER_DIR, _ = check_folder_writable(
CONFIG.NEWSLETTER_DIR, os.path.join(DATA_DIR, 'newsletters'), 'newsletters')
@ -533,6 +535,9 @@ def start():
notification_handler.start_threads(num_threads=CONFIG.NOTIFICATION_THREADS)
notifiers.check_browser_enabled()
# Cancel processing exports
exporter.cancel_exports()
if CONFIG.FIRST_RUN_COMPLETE:
activity_pinger.connect_server(log=True, startup=True)
@ -789,6 +794,17 @@ def dbcheck():
'img_hash TEXT, cloudinary_title TEXT, cloudinary_url TEXT)'
)
# exports table :: This table keeps record of the exported files
c_db.execute(
'CREATE TABLE IF NOT EXISTS exports (id INTEGER PRIMARY KEY AUTOINCREMENT, '
'timestamp INTEGER, section_id INTEGER, user_id INTEGER, rating_key INTEGER, media_type TEXT, '
'filename TEXT, file_format TEXT, '
'metadata_level INTEGER, media_info_level INTEGER, '
'include_thumb INTEGER DEFAULT 0, include_art INTEGER DEFAULT 0, '
'custom_fields TEXT, '
'file_size INTEGER DEFAULT 0, complete INTEGER DEFAULT 0)'
)
# Upgrade sessions table from earlier versions
try:
c_db.execute('SELECT started FROM sessions')

View file

@ -74,6 +74,9 @@ MEDIA_TYPE_HEADERS = {
'artist': 'Artists',
'album': 'Albums',
'track': 'Tracks',
'video': 'Videos',
'audio': 'Tracks',
'photo': 'Photos'
}
PLATFORM_NAME_OVERRIDES = {

View file

@ -96,6 +96,7 @@ _CONFIG_DEFINITIONS = {
'CONFIG_VERSION': (int, 'Advanced', 0),
'DO_NOT_OVERRIDE_GIT_BRANCH': (int, 'General', 0),
'ENABLE_HTTPS': (int, 'General', 0),
'EXPORT_DIR': (str, 'General', ''),
'FIRST_RUN_COMPLETE': (int, 'General', 0),
'FREEZE_DB': (int, 'General', 0),
'GET_FILE_SIZES': (int, 'General', 0),
@ -195,7 +196,7 @@ _WHITELIST_KEYS = ['HTTPS_KEY']
_DO_NOT_IMPORT_KEYS = [
'FIRST_RUN_COMPLETE', 'GET_FILE_SIZES_HOLD', 'GIT_PATH', 'PMS_LOGS_FOLDER',
'BACKUP_DIR', 'CACHE_DIR', 'LOG_DIR', 'NEWSLETTER_DIR', 'NEWSLETTER_CUSTOM_DIR',
'BACKUP_DIR', 'CACHE_DIR', 'EXPORT_DIR', 'LOG_DIR', 'NEWSLETTER_DIR', 'NEWSLETTER_CUSTOM_DIR',
'HTTP_HOST', 'HTTP_PORT', 'HTTP_ROOT',
'HTTP_USERNAME', 'HTTP_PASSWORD', 'HTTP_HASH_PASSWORD', 'HTTP_HASHED_PASSWORD',
'ENABLE_HTTPS', 'HTTPS_CREATE_CERT', 'HTTPS_CERT', 'HTTPS_CERT_CHAIN', 'HTTPS_KEY'

View file

@ -216,6 +216,11 @@ def delete_recently_added():
return clear_table('recently_added')
def delete_exports():
logger.info("Tautulli Database :: Clearing exported items from database.")
return clear_table('exports')
def delete_rows_from_table(table, row_ids):
if row_ids and isinstance(row_ids, str):
row_ids = list(map(helpers.cast_to_int, row_ids.split(',')))

View file

@ -290,8 +290,8 @@ class DataFactory(object):
'recordsTotal': query['totalCount'],
'data': session.friendly_name_to_username(rows),
'draw': query['draw'],
'filter_duration': helpers.human_duration(filter_duration, sig='dhm'),
'total_duration': helpers.human_duration(total_duration, sig='dhm')
'filter_duration': helpers.human_duration(filter_duration, sig='dhm', units='s'),
'total_duration': helpers.human_duration(total_duration, sig='dhm', units='s')
}
return dict

2040
plexpy/exporter.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# This file is part of Tautulli.
#
@ -28,7 +28,7 @@ from cloudinary.api import delete_resources_by_tag
from cloudinary.uploader import upload
from cloudinary.utils import cloudinary_url
import datetime
from functools import wraps
from functools import reduce, wraps
import hashlib
import imghdr
from future.moves.itertools import islice, zip_longest
@ -38,6 +38,7 @@ import ipwhois.utils
from IPy import IP
import json
import math
import operator
import os
import re
import shlex
@ -242,30 +243,44 @@ def iso_to_datetime(iso):
return arrow.get(iso).datetime
def human_duration(s, sig='dhms'):
def datetime_to_iso(dt, to_date=False):
if isinstance(dt, datetime.datetime):
if to_date:
dt = dt.date()
return dt.isoformat()
return dt
hd = ''
if str(s).isdigit() and s > 0:
d, h = divmod(s, 86400)
h, m = divmod(h, 3600)
m, s = divmod(m, 60)
def human_duration(ms, sig='dhms', units='ms'):
factors = {'d': 86400000,
'h': 3600000,
'm': 60000,
's': 1000,
'ms': 1}
if str(ms).isdigit() and ms > 0:
ms = ms * factors[units]
d, h = divmod(ms, factors['d'])
h, m = divmod(h, factors['h'])
m, s = divmod(m, factors['m'])
s, ms = divmod(s, factors['s'])
hd_list = []
if sig >= 'd' and d > 0:
d = d + 1 if sig == 'd' and h >= 12 else d
hd_list.append(str(d) + ' days')
hd_list.append(str(d) + ' day' + ('s' if d > 1 else ''))
if sig >= 'dh' and h > 0:
h = h + 1 if sig == 'dh' and m >= 30 else h
hd_list.append(str(h) + ' hrs')
hd_list.append(str(h) + ' hr' + ('s' if h > 1 else ''))
if sig >= 'dhm' and m > 0:
m = m + 1 if sig == 'dhm' and s >= 30 else m
hd_list.append(str(m) + ' mins')
hd_list.append(str(m) + ' min' + ('s' if m > 1 else ''))
if sig >= 'dhms' and s > 0:
hd_list.append(str(s) + ' secs')
hd_list.append(str(s) + ' sec' + ('s' if s > 1 else ''))
hd = ' '.join(hd_list)
else:
@ -382,6 +397,13 @@ def cleanTitle(title):
return title
def clean_filename(filename, replace='_'):
whitelist = "-_.()[] {}{}".format(string.ascii_letters, string.digits)
cleaned_filename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore').decode()
cleaned_filename = ''.join(c if c in whitelist else replace for c in cleaned_filename)
return cleaned_filename
def split_path(f):
"""
Split a path into components, starting with the drive letter (if any). Given
@ -559,6 +581,64 @@ def process_json_kwargs(json_kwargs):
return params
def process_datatable_rows(rows, json_data, default_sort, search_cols=None, sort_keys=None):
if search_cols is None:
search_cols = []
if sort_keys is None:
sort_keys = {}
results = []
total_count = len(rows)
# Search results
search_value = json_data['search']['value'].lower()
if search_value:
searchable_columns = [d['data'] for d in json_data['columns'] if d['searchable']] + search_cols
for row in rows:
for k, v in row.items():
if k in sort_keys:
value = sort_keys[k].get(v, v)
else:
value = v
value = str(value).lower()
if k in searchable_columns and search_value in value:
results.append(row)
break
else:
results = rows
filtered_count = len(results)
# Sort results
results = sorted(results, key=lambda k: k[default_sort].lower())
sort_order = json_data['order']
for order in reversed(sort_order):
sort_key = json_data['columns'][int(order['column'])]['data']
reverse = True if order['dir'] == 'desc' else False
results = sorted(results, key=lambda k: sort_helper(k, sort_key, sort_keys), reverse=reverse)
# Paginate results
results = results[json_data['start']:(json_data['start'] + json_data['length'])]
data = {
'results': results,
'total_count': total_count,
'filtered_count': filtered_count
}
return data
def sort_helper(k, sort_key, sort_keys):
v = k[sort_key]
if sort_key in sort_keys:
v = sort_keys[sort_key].get(k[sort_key], v)
if isinstance(v, str):
v = v.lower()
return v
def sanitize_out(*dargs, **dkwargs):
""" Helper decorator that sanitized the output
"""
@ -897,7 +977,7 @@ def build_datatables_json(kwargs, dt_columns, default_sort_col=None):
return json.dumps(json_data)
def humanFileSize(bytes, si=True):
def human_file_size(bytes, si=True):
if str(bytes).isdigit():
bytes = cast_to_float(bytes)
else:
@ -919,7 +999,7 @@ def humanFileSize(bytes, si=True):
bytes /= thresh
u += 1
return "{0:.1f} {1}".format(bytes, units[u])
return "{0:.2f} {1}".format(bytes, units[u])
def parse_condition_logic_string(s, num_cond=0):
@ -1153,6 +1233,205 @@ def bool_true(value, return_none=False):
return False
def get_attrs_to_dict(obj, attrs):
d = {}
for attr, sub in attrs.items():
no_attr = False
if isinstance(obj, dict):
value = obj.get(attr, None)
else:
try:
value = getattr(obj, attr)
except AttributeError:
no_attr = True
value = None
if callable(value):
value = value()
if isinstance(sub, str):
if isinstance(value, list):
value = [getattr(o, sub, None) for o in value]
else:
value = getattr(value, sub, None)
elif isinstance(sub, dict):
if isinstance(value, list):
value = [get_attrs_to_dict(o, sub) for o in value]
else:
value = get_attrs_to_dict(value, sub)
elif callable(sub):
if isinstance(value, list):
value = [sub(o) for o in value]
else:
if no_attr:
value = sub(obj)
else:
value = sub(value)
d[attr] = value
return d
def flatten_dict(obj):
return flatten_tree(flatten_keys(obj))
def flatten_keys(obj, key='', sep='.'):
if isinstance(obj, list):
new_obj = [flatten_keys(o, key=key) for o in obj]
elif isinstance(obj, dict):
new_key = key + sep if key else ''
new_obj = {new_key + k: flatten_keys(v, key=new_key + k) for k, v in obj.items()}
else:
new_obj = obj
return new_obj
def flatten_tree(obj, key=''):
if isinstance(obj, list):
new_rows = []
for o in obj:
if isinstance(o, dict):
new_rows.extend(flatten_tree(o))
else:
new_rows.append({key: o})
elif isinstance(obj, dict):
common_keys = {}
all_rows = [[common_keys]]
for k, v in obj.items():
if isinstance(v, list):
all_rows.append(flatten_tree(v, k))
elif isinstance(v, dict):
common_keys.update(*flatten_tree(v))
else:
common_keys[k] = v
new_rows = [{k: v for r in row for k, v in r.items()}
for row in zip_longest(*all_rows, fillvalue={})]
else:
new_rows = []
return new_rows
# https://stackoverflow.com/a/14692747
def get_by_path(root, items):
"""Access a nested object in root by item sequence."""
return reduce(operator.getitem, items, root)
def set_by_path(root, items, value):
"""Set a value in a nested object in root by item sequence."""
get_by_path(root, items[:-1])[items[-1]] = value
def get_dict_value_by_path(root, attr):
split_attr = attr.split('.')
value = get_by_path(root, split_attr)
for _attr in reversed(split_attr):
value = {_attr: value}
return value
# https://stackoverflow.com/a/7205107
def dict_merge(a, b, path=None):
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
dict_merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
else:
pass
else:
a[key] = b[key]
return a
#https://stackoverflow.com/a/26853961
def dict_update(*dict_args):
"""
Given any number of dictionaries, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dictionaries.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
# https://stackoverflow.com/a/28703510
def escape_xml(value):
if value is None:
return ''
value = str(value) \
.replace("&", "&") \
.replace("<", "&lt;") \
.replace(">", "&gt;") \
.replace('"', "&quot;") \
.replace("'", "&apos;")
return value
# https://gist.github.com/reimund/5435343/
def dict_to_xml(d, root_node=None, indent=4, level=0):
wrap = not bool(root_node is None or isinstance(d, list))
root = root_node or 'objects'
root_singular = root[:-1] if root.endswith('s') and isinstance(d, list) else root
xml = ''
children = []
if isinstance(d, dict):
for key, value in sorted(d.items()):
if isinstance(value, dict):
children.append(dict_to_xml(value, key, level=level + 1))
elif isinstance(value, list):
children.append(dict_to_xml(value, key, level=level + 1))
else:
xml = '{} {}="{}"'.format(xml, key, escape_xml(value))
elif isinstance(d, list):
for value in d:
# Custom tag replacement for collections/playlists
if isinstance(value, dict) and root in ('children', 'items'):
root_singular = value.get('type', root_singular)
children.append(dict_to_xml(value, root_singular, level=level))
else:
children.append(escape_xml(d))
end_tag = '>' if len(children) > 0 else '/>'
end_tag += '\n' if isinstance(d, list) or isinstance(d, dict) else ''
spaces = ' ' * level * indent
if wrap or isinstance(d, dict):
xml = '{}<{}{}{}'.format(spaces, root, xml, end_tag)
if len(children) > 0:
for child in children:
xml = '{}{}'.format(xml, child)
if wrap or isinstance(d, dict):
spaces = spaces if isinstance(d, dict) else ''
xml = '{}{}</{}>\n'.format(xml, spaces, root)
return xml
def is_hdr(bit_depth, color_space):
bit_depth = cast_to_int(bit_depth)
return bit_depth > 8 and color_space == 'bt2020nc'
def page(endpoint, *args, **kwargs):
endpoints = {
'pms_image_proxy': pms_image_proxy,

View file

@ -33,6 +33,7 @@ if plexpy.PYTHON2:
import plextv
import pmsconnect
import session
from plex import Plex
else:
from plexpy import common
from plexpy import database
@ -42,6 +43,7 @@ else:
from plexpy import plextv
from plexpy import pmsconnect
from plexpy import session
from plexpy.plex import Plex
def refresh_libraries():
@ -142,6 +144,163 @@ def has_library_type(section_type):
return bool(result)
def get_collections(section_id=None):
plex = Plex(plexpy.CONFIG.PMS_URL, session.get_session_user_token())
library = plex.get_library(section_id)
if library.type not in ('movie', 'show', 'artist'):
return []
collections = library.collection()
collections_list = []
for collection in collections:
collection_mode = collection.collectionMode
if collection_mode is None:
collection_mode = -1
collection_sort = collection.collectionSort
if collection_sort is None:
collection_sort = 0
collection_dict = {
'addedAt': helpers.datetime_to_iso(collection.addedAt),
'art': collection.art,
'childCount': collection.childCount,
'collectionMode': helpers.cast_to_int(collection_mode),
'collectionSort': helpers.cast_to_int(collection_sort),
'contentRating': collection.contentRating,
'guid': collection.guid,
'librarySectionID': collection.librarySectionID,
'librarySectionTitle': collection.librarySectionTitle,
'maxYear': collection.maxYear,
'minYear': collection.minYear,
'ratingKey': collection.ratingKey,
'subtype': collection.subtype,
'summary': collection.summary,
'thumb': collection.thumb,
'title': collection.title,
'titleSort': collection.titleSort,
'type': collection.type,
'updatedAt': helpers.datetime_to_iso(collection.updatedAt)
}
collections_list.append(collection_dict)
return collections_list
def get_collections_list(section_id=None, **kwargs):
if not section_id:
default_return = {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to get collections: missing section_id.'}
return default_return
collections = get_collections(section_id=section_id)
# Get datatables JSON data
json_data = helpers.process_json_kwargs(json_kwargs=kwargs['json_data'])
search_cols = ['title']
sort_keys = {
'collectionMode': {
-1: 'Library Default',
0: 'Hide collection',
1: 'Hide items in this collection',
2: 'Show this collection and its items'
},
'collectionSort': {
0: 'Release date',
1: 'Alphabetical'
}
}
results = helpers.process_datatable_rows(
collections, json_data, default_sort='titleSort',
search_cols=search_cols, sort_keys=sort_keys)
data = {
'recordsFiltered': results['filtered_count'],
'recordsTotal': results['total_count'],
'data': results['results'],
'draw': int(json_data['draw'])
}
return data
def get_playlists(section_id=None, user_id=None):
if user_id and not session.get_session_user_id():
import users
user_tokens = users.Users().get_tokens(user_id=user_id)
plex_token = user_tokens['server_token']
else:
plex_token = session.get_session_user_token()
if not plex_token:
return []
plex = Plex(plexpy.CONFIG.PMS_URL, plex_token)
if user_id:
playlists = plex.plex.playlists()
else:
library = plex.get_library(section_id)
playlists = library.playlist()
playlists_list = []
for playlist in playlists:
playlist_dict = {
'addedAt': helpers.datetime_to_iso(playlist.addedAt),
'composite': playlist.composite,
'duration': playlist.duration,
'guid': playlist.guid,
'leafCount': playlist.leafCount,
'librarySectionID': section_id,
'playlistType': playlist.playlistType,
'ratingKey': playlist.ratingKey,
'smart': playlist.smart,
'summary': playlist.summary,
'title': playlist.title,
'type': playlist.type,
'updatedAt': helpers.datetime_to_iso(playlist.updatedAt),
'userID': user_id
}
playlists_list.append(playlist_dict)
return playlists_list
def get_playlists_list(section_id=None, user_id=None, **kwargs):
if not section_id and not user_id:
default_return = {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to get playlists: missing section_id.'}
return default_return
playlists = get_playlists(section_id=section_id, user_id=user_id)
# Get datatables JSON data
json_data = helpers.process_json_kwargs(json_kwargs=kwargs['json_data'])
results = helpers.process_datatable_rows(
playlists, json_data, default_sort='title')
data = {
'recordsFiltered': results['filtered_count'],
'recordsTotal': results['total_count'],
'data': results['results'],
'draw': int(json_data['draw'])
}
return data
class Libraries(object):
def __init__(self):

View file

@ -1072,7 +1072,7 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m
'subtitle_language_code': notify_params['subtitle_language_code'],
'file': notify_params['file'],
'filename': os.path.basename(notify_params['file']),
'file_size': helpers.humanFileSize(notify_params['file_size']),
'file_size': helpers.human_file_size(notify_params['file_size']),
'indexes': notify_params['indexes'],
'section_id': notify_params['section_id'],
'rating_key': notify_params['rating_key'],

View file

@ -3028,8 +3028,7 @@ class SCRIPTS(Notifier):
if user_id:
user_tokens = users.Users().get_tokens(user_id=user_id)
if user_tokens and user_tokens['server_token']:
custom_env['PLEX_USER_TOKEN'] = str(user_tokens['server_token'])
custom_env['PLEX_USER_TOKEN'] = str(user_tokens['server_token'])
if self.pythonpath and plexpy.INSTALL_TYPE not in ('windows', 'macos'):
custom_env['PYTHONPATH'] = os.pathsep.join([p for p in sys.path if p])

42
plexpy/plex.py Normal file
View file

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# This file is part of Tautulli.
#
# Tautulli is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tautulli is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from future.builtins import object
from future.builtins import str
from plexapi.server import PlexServer
import plexpy
if plexpy.PYTHON2:
import logger
else:
from plexpy import logger
class Plex(object):
def __init__(self, url, token):
self.plex = PlexServer(url, token)
def get_library(self, section_id):
return self.plex.library.sectionByID(str(section_id))
def get_library_items(self, section_id):
return self.get_library(str(section_id)).all()
def get_item(self, rating_key):
return self.plex.fetchItem(rating_key)

View file

@ -24,6 +24,7 @@ import json
import os
import time
from future.moves.urllib.parse import quote, quote_plus, urlencode
from xml.dom.minidom import Node
import plexpy
if plexpy.PYTHON2:
@ -31,6 +32,7 @@ if plexpy.PYTHON2:
import common
import helpers
import http_handler
import libraries
import logger
import plextv
import session
@ -40,6 +42,7 @@ else:
from plexpy import common
from plexpy import helpers
from plexpy import http_handler
from plexpy import libraries
from plexpy import logger
from plexpy import plextv
from plexpy import session
@ -173,6 +176,22 @@ class PmsConnect(object):
return request
def get_playlist_items(self, rating_key='', output_format=''):
"""
Return metadata for items of the requested playlist.
Parameters required: rating_key { Plex ratingKey }
Optional parameters: output_format { dict, json }
Output: array
"""
uri = '/playlists/' + rating_key + '/items'
request = self.request_handler.make_request(uri=uri,
request_type='GET',
output_format=output_format)
return request
def get_recently_added(self, start='0', count='0', output_format=''):
"""
Return list of recently added items.
@ -594,7 +613,7 @@ class PmsConnect(object):
return output
def get_metadata_details(self, rating_key='', sync_id='', plex_guid='',
def get_metadata_details(self, rating_key='', sync_id='', plex_guid='', section_id='',
skip_cache=False, cache_key=None, return_cache=False, media_info=True):
"""
Return processed and validated metadata list for requested item.
@ -654,6 +673,8 @@ class PmsConnect(object):
metadata_main_list = a.getElementsByTagName('Track')
elif a.getElementsByTagName('Photo'):
metadata_main_list = a.getElementsByTagName('Photo')
elif a.getElementsByTagName('Playlist'):
metadata_main_list = a.getElementsByTagName('Playlist')
else:
logger.debug("Tautulli Pmsconnect :: Metadata failed")
return {}
@ -669,9 +690,13 @@ class PmsConnect(object):
if metadata_main.nodeName == 'Directory' and metadata_type == 'photo':
metadata_type = 'photo_album'
section_id = helpers.get_xml_attr(a, 'librarySectionID')
section_id = helpers.get_xml_attr(a, 'librarySectionID') or section_id
library_name = helpers.get_xml_attr(a, 'librarySectionTitle')
if not library_name and section_id:
library_data = libraries.Libraries().get_details(section_id)
library_name = library_data['section_name']
directors = []
writers = []
actors = []
@ -1247,7 +1272,27 @@ class PmsConnect(object):
'collections': collections,
'guids': guids,
'full_title': helpers.get_xml_attr(metadata_main, 'title'),
'children_count': helpers.cast_to_int(helpers.get_xml_attr(metadata_main, 'childCount')),
'live': int(helpers.get_xml_attr(metadata_main, 'live') == '1')
}
elif metadata_type == 'playlist':
metadata = {'media_type': metadata_type,
'section_id': section_id,
'library_name': library_name,
'rating_key': helpers.get_xml_attr(metadata_main, 'ratingKey'),
'guid': helpers.get_xml_attr(metadata_main, 'guid'),
'title': helpers.get_xml_attr(metadata_main, 'title'),
'summary': helpers.get_xml_attr(metadata_main, 'summary'),
'duration': helpers.get_xml_attr(metadata_main, 'duration'),
'composite': helpers.get_xml_attr(metadata_main, 'composite'),
'thumb': helpers.get_xml_attr(metadata_main, 'composite'),
'added_at': helpers.get_xml_attr(metadata_main, 'addedAt'),
'updated_at': helpers.get_xml_attr(metadata_main, 'updatedAt'),
'last_viewed_at': helpers.get_xml_attr(metadata_main, 'lastViewedAt'),
'children_count': helpers.cast_to_int(helpers.get_xml_attr(metadata_main, 'leafCount')),
'smart': helpers.cast_to_int(helpers.get_xml_attr(metadata_main, 'smart')),
'playlist_type': helpers.get_xml_attr(metadata_main, 'playlistType'),
'live': int(helpers.get_xml_attr(metadata_main, 'live') == '1')
}
@ -2242,13 +2287,15 @@ class PmsConnect(object):
logger.warn("Tautulli Pmsconnect :: Failed to terminate session: %s." % msg)
return msg
def get_item_children(self, rating_key='', get_grandchildren=False):
def get_item_children(self, rating_key='', media_type=None, get_grandchildren=False):
"""
Return processed and validated children list.
Output: array
"""
if get_grandchildren:
if media_type == 'playlist':
children_data = self.get_playlist_items(rating_key, output_format='xml')
elif get_grandchildren:
children_data = self.get_metadata_grandchildren(rating_key, output_format='xml')
else:
children_data = self.get_metadata_children(rating_key, output_format='xml')
@ -2272,12 +2319,9 @@ class PmsConnect(object):
result_data = []
if a.getElementsByTagName('Directory'):
result_data = a.getElementsByTagName('Directory')
if a.getElementsByTagName('Video'):
result_data = a.getElementsByTagName('Video')
if a.getElementsByTagName('Track'):
result_data = a.getElementsByTagName('Track')
for x in a.childNodes:
if x.nodeType == Node.ELEMENT_NODE and x.tagName in ('Directory', 'Video', 'Track', 'Photo'):
result_data.append(x)
if result_data:
for m in result_data:
@ -2307,7 +2351,11 @@ class PmsConnect(object):
for label in m.getElementsByTagName('Label'):
labels.append(helpers.get_xml_attr(label, 'tag'))
children_output = {'media_type': helpers.get_xml_attr(m, 'type'),
media_type = helpers.get_xml_attr(m, 'type')
if m.nodeName == 'Directory' and media_type == 'photo':
media_type = 'photo_album'
children_output = {'media_type': media_type,
'section_id': helpers.get_xml_attr(m, 'librarySectionID'),
'library_name': helpers.get_xml_attr(m, 'librarySectionTitle'),
'rating_key': helpers.get_xml_attr(m, 'ratingKey'),

View file

@ -57,6 +57,22 @@ def get_session_user_id():
_session = get_session_info()
return str(_session['user_id']) if _session['user_group'] == 'guest' and _session['user_id'] else None
def get_session_user_token():
"""
Returns the user's server_token for the current logged in session
"""
_session = get_session_info()
if _session['user_group'] == 'guest' and _session['user_id']:
session_user_tokens = users.Users().get_tokens(_session['user_id'])
user_token = session_user_tokens['server_token']
else:
user_token = plexpy.CONFIG.PMS_TOKEN
return user_token
def get_session_shared_libraries():
"""
Returns a tuple of section_id for the current logged in session

View file

@ -789,6 +789,12 @@ class Users(object):
return session.friendly_name_to_username(result)
def get_tokens(self, user_id=None):
tokens = {
'allow_guest': 0,
'user_token': '',
'server_token': ''
}
if user_id:
try:
monitor_db = database.MonitorDatabase()
@ -802,11 +808,11 @@ class Users(object):
}
return tokens
else:
return None
return tokens
except:
return None
return tokens
return None
return tokens
def get_filters(self, user_id=None):
if not user_id:

View file

@ -19,8 +19,9 @@ from __future__ import unicode_literals
from future.builtins import next
from future.builtins import object
from future.builtins import str
from backports import csv
from io import open
from io import open, BytesIO
import base64
import json
import linecache
@ -28,10 +29,11 @@ import os
import shutil
import sys
import threading
import zipfile
from future.moves.urllib.parse import urlencode
import cherrypy
from cherrypy.lib.static import serve_file, serve_download
from cherrypy.lib.static import serve_file, serve_fileobj, serve_download
from cherrypy._cperror import NotFound
from hashing_passwords import make_hash
@ -48,6 +50,7 @@ if plexpy.PYTHON2:
import config
import database
import datafactory
import exporter
import graphs
import helpers
import http_handler
@ -81,6 +84,7 @@ else:
from plexpy import config
from plexpy import database
from plexpy import datafactory
from plexpy import exporter
from plexpy import graphs
from plexpy import helpers
from plexpy import http_handler
@ -839,6 +843,82 @@ class WebInterface(object):
return result
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth()
@addtoapi("get_collections_table")
def get_collections_list(self, section_id=None, **kwargs):
""" Get the data on the Tautulli collections tables.
```
Required parameters:
section_id (str): The id of the Plex library section
Optional parameters:
None
Returns:
json:
{"draw": 1,
"recordsTotal": 5,
"data":
[...]
}
```
"""
# Check if datatables json_data was received.
# If not, then build the minimal amount of json data for a query
if not kwargs.get('json_data'):
# TODO: Find some one way to automatically get the columns
dt_columns = [("titleSort", True, True),
("collectionMode", True, True),
("collectionSort", True, True),
("childCount", True, False)]
kwargs['json_data'] = build_datatables_json(kwargs, dt_columns, "titleSort")
result = libraries.get_collections_list(section_id=section_id, **kwargs)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth()
@addtoapi("get_playlists_table")
def get_playlists_list(self, section_id=None, user_id=None, **kwargs):
""" Get the data on the Tautulli playlists tables.
```
Required parameters:
section_id (str): The section id of the Plex library, OR
user_id (str): The user id of the Plex user
Optional parameters:
None
Returns:
json:
{"draw": 1,
"recordsTotal": 5,
"data":
[...]
}
```
"""
# Check if datatables json_data was received.
# If not, then build the minimal amount of json data for a query
if not kwargs.get('json_data'):
# TODO: Find some one way to automatically get the columns
dt_columns = [("title", True, True),
("leafCount", True, True),
("duration", True, True)]
kwargs['json_data'] = build_datatables_json(kwargs, dt_columns, "title")
result = libraries.get_playlists_list(section_id=section_id,
user_id=user_id,
**kwargs)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth(member_of("admin"))
@ -2997,6 +3077,7 @@ class WebInterface(object):
"backup_dir": plexpy.CONFIG.BACKUP_DIR,
"backup_interval": plexpy.CONFIG.BACKUP_INTERVAL,
"cache_dir": plexpy.CONFIG.CACHE_DIR,
"export_dir": plexpy.CONFIG.EXPORT_DIR,
"log_dir": plexpy.CONFIG.LOG_DIR,
"log_blacklist": checked(plexpy.CONFIG.LOG_BLACKLIST),
"check_github": checked(plexpy.CONFIG.CHECK_GITHUB),
@ -4301,7 +4382,7 @@ class WebInterface(object):
@cherrypy.expose
@requireAuth()
def info(self, rating_key=None, guid=None, source=None, **kwargs):
def info(self, rating_key=None, guid=None, source=None, section_id=None, user_id=None, **kwargs):
if rating_key and not str(rating_key).isdigit():
raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT)
@ -4312,10 +4393,16 @@ class WebInterface(object):
"pms_web_url": plexpy.CONFIG.PMS_WEB_URL
}
if user_id:
user_data = users.Users()
user_info = user_data.get_details(user_id=user_id)
else:
user_info = {}
# Try to get metadata from the Plex server first
if rating_key:
pms_connect = pmsconnect.PmsConnect()
metadata = pms_connect.get_metadata_details(rating_key=rating_key)
metadata = pms_connect.get_metadata_details(rating_key=rating_key, section_id=section_id)
# If the item is not found on the Plex server, get the metadata from history
if not metadata and source == 'history':
@ -4334,7 +4421,7 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT)
return serve_template(templatename="info.html", metadata=metadata, title="Info",
config=config, source=source)
config=config, source=source, user_info=user_info)
else:
if get_session_user_id():
raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT)
@ -4343,13 +4430,14 @@ class WebInterface(object):
@cherrypy.expose
@requireAuth()
def get_item_children(self, rating_key='', **kwargs):
def get_item_children(self, rating_key='', media_type=None, **kwargs):
pms_connect = pmsconnect.PmsConnect()
result = pms_connect.get_item_children(rating_key=rating_key)
result = pms_connect.get_item_children(rating_key=rating_key, media_type=media_type)
if result:
return serve_template(templatename="info_children_list.html", data=result, title="Children List")
return serve_template(templatename="info_children_list.html", data=result,
media_type=media_type, title="Children List")
else:
logger.warn("Unable to retrieve data for get_item_children.")
return serve_template(templatename="info_children_list.html", data=None, title="Children List")
@ -4467,8 +4555,9 @@ class WebInterface(object):
img = '/library/metadata/{}/thumb'.format(rating_key)
if img.startswith('/library/metadata'):
parts = 6 if 'composite' in img else 5
img_split = img.split('/')
img = '/'.join(img_split[:5])
img = '/'.join(img_split[:parts])
img_rating_key = img_split[3]
if rating_key != img_rating_key:
rating_key = img_rating_key
@ -6426,3 +6515,307 @@ class WebInterface(object):
status['message'] = 'Database not ok'
return status
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth(member_of("admin"))
@addtoapi("get_exports_table")
def get_export_list(self, section_id=None, user_id=None, rating_key=None, **kwargs):
""" Get the data on the Tautulli export tables.
```
Required parameters:
section_id (str): The id of the Plex library section, OR
user_id (str): The id of the Plex user, OR
rating_key (str): The rating key of the exported item
Optional parameters:
order_column (str): "added_at", "sort_title", "container", "bitrate", "video_codec",
"video_resolution", "video_framerate", "audio_codec", "audio_channels",
"file_size", "last_played", "play_count"
order_dir (str): "desc" or "asc"
start (int): Row to start from, 0
length (int): Number of items to return, 25
search (str): A string to search for, "Thrones"
Returns:
json:
{"draw": 1,
"recordsTotal": 10,
"recordsFiltered": 3,
"data":
[{"row_id": 2,
"timestamp": 1596484600,
"section_id": 1,
"rating_key": 270716,
"media_type": "movie",
"media_type_title": "Movie",
"filename": "Movie - Frozen II [270716].20200803125640.json",
"complete": 1
},
{...},
{...}
]
}
```
"""
# Check if datatables json_data was received.
# If not, then build the minimal amount of json data for a query
if not kwargs.get('json_data'):
# TODO: Find some one way to automatically get the columns
dt_columns = [("timestamp", True, False),
("media_type_title", True, True),
("rating_key", True, True),
("file_format", True, True),
("filename", True, True),
("complete", True, False)]
kwargs['json_data'] = build_datatables_json(kwargs, dt_columns, "timestamp")
result = exporter.get_export_datatable(section_id=section_id,
user_id=user_id,
rating_key=rating_key,
kwargs=kwargs)
return result
@cherrypy.expose
@requireAuth(member_of("admin"))
def export_metadata_modal(self, section_id=None, user_id=None, rating_key=None,
media_type=None, sub_media_type=None,
export_type=None, **kwargs):
file_formats = exporter.Export.FILE_FORMATS
return serve_template(templatename="export_modal.html", title="Export Metadata",
section_id=section_id, user_id=user_id, rating_key=rating_key,
media_type=media_type, sub_media_type=sub_media_type,
export_type=export_type, file_formats=file_formats)
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth(member_of("admin"))
@addtoapi()
def get_export_fields(self, media_type=None, sub_media_type=None, **kwargs):
""" Get a list of available custom export fields.
```
Required parameters:
media_type (str): The media type of the fields to return
Optional parameters:
sub_media_type (str): The child media type for
collections (movie, show, artist, album, photoalbum),
or playlists (video, audio, photo)
Returns:
json:
{"metadata_fields":
[{"field": "addedAt", "level": 1},
...
],
"media_info_fields":
[{"field": "media.aspectRatio", "level": 1},
...
]
}
```
"""
custom_fields = exporter.get_custom_fields(media_type=media_type,
sub_media_type=sub_media_type)
return custom_fields
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth(member_of("admin"))
@addtoapi()
def export_metadata(self, section_id=None, user_id=None, rating_key=None, file_format='csv',
metadata_level=1, media_info_level=1,
include_thumb=False, include_art=False,
custom_fields='', export_type=None, **kwargs):
""" Export library or media metadata to a file
```
Required parameters:
section_id (int): The section id of the library items to export, OR
user_id (int): The user id of the playlist items to export,
rating_key (int): The rating key of the media item to export
Optional parameters:
file_format (str): csv (default), json, or xml
metadata_level (int): The level of metadata to export (default 1)
media_info_level (int): The level of media info to export (default 1)
include_thumb (bool): True to export poster/cover images
include_art (bool): True to export background artwork images
custom_fields (str): Comma separated list of custom fields to export
in addition to the export level selected
export_type (str): collection or playlist for library/user export,
otherwise default to all library items
Returns:
json:
{"result": "success",
"message": "Metadata export has started."
}
```
"""
result = exporter.Export(section_id=section_id,
user_id=user_id,
rating_key=rating_key,
file_format=file_format,
metadata_level=metadata_level,
media_info_level=media_info_level,
include_thumb=helpers.bool_true(include_thumb),
include_art=helpers.bool_true(include_art),
custom_fields=custom_fields,
export_type=export_type).export()
if result is True:
return {'result': 'success', 'message': 'Metadata export has started.'}
else:
return {'result': 'error', 'message': result}
@cherrypy.expose
@requireAuth(member_of("admin"))
def view_export(self, export_id=None, **kwargs):
""" Download an exported metadata file
```
Required parameters:
export_id (int): The row id of the exported file to view
Optional parameters:
None
Returns:
download
```
"""
result = exporter.get_export(export_id=export_id)
if result and result['complete'] == 1 and result['exists']:
filepath = exporter.get_export_filepath(result['filename'])
if result['file_format'] == 'csv':
with open(filepath, 'r', encoding='utf-8') as infile:
reader = csv.DictReader(infile)
table = '<table><tr><th>' + \
'</th><th>'.join(reader.fieldnames) + \
'</th></tr><tr>' + \
'</tr><tr>'.join(
'<td>' + '</td><td>'.join(row.values()) + '</td>' for row in reader) + \
'</tr></table>'
style = '<style>' \
'body {margin: 0;}' \
'table {border-collapse: collapse; overflow-y: auto; height: 100px;} ' \
'th {position: sticky; top: 0; background: #ddd; box-shadow: inset 1px 1px #000, 0 1px #000;}' \
'td {box-shadow: inset 1px -1px #000;}' \
'th, td {padding: 3px; white-space: nowrap;}' \
'</style>'
return '{style}<pre>{table}</pre>'.format(style=style, table=table)
elif result['file_format'] == 'json':
return serve_file(filepath, name=result['filename'], content_type='application/json;charset=UTF-8')
elif result['file_format'] == 'xml':
return serve_file(filepath, name=result['filename'], content_type='application/xml;charset=UTF-8')
elif result['file_format'] == 'm3u8':
return serve_file(filepath, name=result['filename'], content_type='text/plain;charset=UTF-8')
else:
if result and result.get('complete') == 0:
msg = 'Export is still being processed.'
elif result and result.get('complete') == -1:
msg = 'Export failed to process.'
elif result and not result.get('exists'):
msg = 'Export file does not exist.'
else:
msg = 'Invalid export_id provided.'
cherrypy.response.headers['Content-Type'] = 'application/json;charset=UTF-8'
return json.dumps({'result': 'error', 'message': msg}).encode('utf-8')
@cherrypy.expose
@requireAuth(member_of("admin"))
@addtoapi()
def download_export(self, export_id=None, **kwargs):
""" Download an exported metadata file
```
Required parameters:
export_id (int): The row id of the exported file to download
Optional parameters:
None
Returns:
download
```
"""
result = exporter.get_export(export_id=export_id)
if result and result['complete'] == 1 and result['exists']:
export_filepath = exporter.get_export_filepath(result['filename'])
if result['include_thumb'] or result['include_art']:
zip_filename = '{}.zip'.format(os.path.splitext(result['filename'])[0])
images_folder = exporter.get_export_filepath(result['filename'], images=True)
if os.path.exists(images_folder):
buffer = BytesIO()
temp_zip = zipfile.ZipFile(buffer, 'w')
temp_zip.write(export_filepath, arcname=result['filename'])
_images_folder = os.path.basename(images_folder)
for f in os.listdir(images_folder):
image_path = os.path.join(images_folder, f)
temp_zip.write(image_path, arcname=os.path.join(_images_folder, f))
temp_zip.close()
return serve_fileobj(buffer.getvalue(), content_type='application/zip',
disposition='attachment', name=zip_filename)
return serve_download(exporter.get_export_filepath(result['filename']), name=result['filename'])
else:
if result and result.get('complete') == 0:
msg = 'Export is still being processed.'
elif result and result.get('complete') == -1:
msg = 'Export failed to process.'
elif result and not result.get('exists'):
msg = 'Export file does not exist.'
else:
msg = 'Invalid export_id provided.'
cherrypy.response.headers['Content-Type'] = 'application/json;charset=UTF-8'
return json.dumps({'result': 'error', 'message': msg}).encode('utf-8')
@cherrypy.expose
@cherrypy.tools.json_out()
@requireAuth(member_of("admin"))
@addtoapi()
def delete_export(self, export_id=None, delete_all=False, **kwargs):
""" Delete exports from Tautulli.
```
Required parameters:
export_id (int): The row id of the exported file to delete
Optional parameters:
delete_all (bool): 'true' to delete all exported files
Returns:
None
```
"""
if helpers.bool_true(delete_all):
result = exporter.delete_all_exports()
if result:
return {'result': 'success', 'message': 'All exports deleted successfully.'}
else:
return {'result': 'error', 'message': 'Failed to delete all exports.'}
else:
result = exporter.delete_export(export_id=export_id)
if result:
return {'result': 'success', 'message': 'Export deleted successfully.'}
else:
return {'result': 'error', 'message': 'Failed to delete export.'}