diff --git a/data/interfaces/default/history.html b/data/interfaces/default/history.html index 08527355..521fb8a4 100644 --- a/data/interfaces/default/history.html +++ b/data/interfaces/default/history.html @@ -34,11 +34,12 @@ IP Address Title Started - Paused + Paused Stopped Duration Completed RatingKey + @@ -134,7 +135,8 @@ { "targets": [0], "data":"id", - "visible": false + "visible": false, + "searchable": false }, { "targets": [1], @@ -146,7 +148,8 @@ } else { $(td).html(moment(cellData,"X").format("${date_format}")); } - } + }, + "searchable": false }, { "targets": [2], @@ -185,14 +188,16 @@ "data":"started", "render": function ( data, type, full ) { return moment(data, "X").format("${time_format}"); - } + }, + "searchable": false }, { "targets": [7], - "data":"paused", + "data":"paused_counter", "render": function ( data, type, full ) { return Math.round(moment.duration(data, 'seconds').as('minutes')) + ' mins'; - } + }, + "searchable": false }, { "targets": [8], @@ -203,7 +208,8 @@ } else { return data; } - } + }, + "searchable": false }, { "targets": [9], @@ -214,7 +220,8 @@ } else { return data; } - } + }, + "searchable": false }, { "targets": [10], @@ -226,12 +233,20 @@ } else { return '100%'; } - } + }, + "searchable": false }, { "targets": [11], "data":"rating_key", - "visible": false + "visible": false, + "searchable": false + }, + { + "targets": [12], + "data":"xml", + "searchable":false, + "visible":false } ], "drawCallback": function (settings) { diff --git a/data/interfaces/default/user.html b/data/interfaces/default/user.html new file mode 100644 index 00000000..3f000be9 --- /dev/null +++ b/data/interfaces/default/user.html @@ -0,0 +1,152 @@ +<%inherit file="base.html"/> +<%! + from plexpy import helpers +%> + +<%def name="headIncludes()"> + + + +<%def name="body()"> +
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+

Global Stats

+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

Platform Stats

+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

Recently watched

+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

Public IP Addresses for + Username +

+
+
+ + + + + + + + + + + +
Last seen IP Address Play Count Platform (Last Seen) Location Location
+
+
+
+
+
+
+
+
+
+
+
+
+

Watching History for + Username +

+
+
+ Table + +
+
+
+
+
+
+ + + +<%def name="javascriptIncludes()"> + + + + + + \ No newline at end of file diff --git a/data/interfaces/default/users.html b/data/interfaces/default/users.html index d09354ce..576dc220 100644 --- a/data/interfaces/default/users.html +++ b/data/interfaces/default/users.html @@ -57,7 +57,7 @@ "lengthMenu":"Show _MENU_ entries per page", "info":"Showing _START_ to _END_ of _TOTAL_ active users", "infoEmpty":"Showing 0 to 0 of 0 entries", - "infoFiltered":"(filtered from _MAX_ total entries)", + "infoFiltered":"", "emptyTable": "No data in table", }, "destroy": true, @@ -80,11 +80,11 @@ "targets": [0], "data": null, "createdCell": function (td, cellData, rowData, row, col) { - if (rowData['user_thumb'] === '') { + //if (rowData['user_thumb'] === '') { $(td).html('User Logo'); - } else { - $(td).html('User Logo'); - } + //} else { + // $(td).html('User Logo'); + //} }, "orderable": false, "className": "users-poster-face", @@ -103,7 +103,8 @@ }, { "targets": [3], - "data": "ip_address" + "data": "ip_address", + "searchable": false }, { "targets": [4], diff --git a/plexpy/datatables.py b/plexpy/datatables.py new file mode 100644 index 00000000..a3d0f5b2 --- /dev/null +++ b/plexpy/datatables.py @@ -0,0 +1,181 @@ +# This file is part of PlexPy. +# +# PlexPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# PlexPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with PlexPy. If not, see . + +# TODO: Implement with sqlite3 directly instead of using db class + +from plexpy import logger, helpers, db + +import re + + +class DataTables(object): + """ + Server side processing for Datatables + """ + + def __init__(self): + self.ssp_db = db.DBConnection() + + def ssp_query(self, table_name, + columns=[], + start=0, + length=0, + order_column=0, + order_dir='asc', + search_value='', + search_regex='', + custom_where='', + group_by='', + kwargs=None): + + parameters = self.process_kwargs(kwargs) + + if group_by != '': + grouping = True + else: + grouping = False + + column_data = self.extract_columns(columns) + where = self.construct_where(column_data, search_value, grouping, parameters) + order = self.construct_order(column_data, order_column, order_dir, parameters) + + # TODO: custom_where is ugly and causes issues with reported total results + if custom_where != '': + where += 'AND (' + custom_where + ')' + + if grouping: + query = 'SELECT * FROM (SELECT %s FROM %s GROUP BY %s) %s %s' \ + % (column_data['column_string'], table_name, group_by, + where, order) + else: + query = 'SELECT %s FROM %s %s %s' \ + % (column_data['column_string'], table_name, where, + order) + + filtered = self.ssp_db.select(query) + if search_value == '': + totalcount = len(filtered) + else: + totalcount = self.ssp_db.select('SELECT COUNT(*) from %s' % table_name)[0][0] + + # logger.debug(u"Query string: %s" % query) + + result = filtered[start:(start + length)] + output = {'result': result, + 'filteredCount': len(filtered), + 'totalCount': totalcount} + + return output + + @staticmethod + def construct_order(column_data, order_column, order_dir, parameters=None): + order = '' + if parameters: + for parameter in parameters: + if parameter['data'] != '': + if int(order_column) == parameters.index(parameter): + if parameter['data'] in column_data['column_named'] and parameter['orderable'] == 'true': + order = 'ORDER BY %s COLLATE NOCASE %s' % (parameter['data'], order_dir) + logger.debug(u"order string %s " % order) + else: + order = 'ORDER BY %s COLLATE NOCASE %s' % (column_data['column_named'][order_column], order_dir) + logger.debug(u"order string (NO param received) %s " % order) + return order + + @staticmethod + def construct_where(column_data, search_value='', grouping=False, parameters=None): + if search_value != '': + where = 'WHERE ' + if parameters: + for column in column_data['column_named']: + search_skip = False + for parameter in parameters: + if column in parameter['data']: + if parameter['searchable'] == 'true': + logger.debug(u"Column %s is searchable." % column) + where += column + ' LIKE "%' + search_value + '%" OR ' + search_skip = True + else: + logger.debug(u"Column %s is NOT searchable." % column) + search_skip = True + + if not search_skip: + where += column + ' LIKE "%' + search_value + '%" OR ' + else: + for column in column_data['column_named']: + where += column + ' LIKE "%' + search_value + '%" OR ' + + # TODO: This will break the query if all parameters are excluded + where = where[:-4] + + return where + else: + return '' + + @staticmethod + def extract_columns(columns=[]): + columns_string = '' + columns_literal = [] + columns_named = [] + + for column in columns: + columns_string += column + columns_string += ', ' + # TODO: make this case insensitive + if ' as ' in column: + columns_literal.append(column.rpartition(' as ')[0]) + columns_named.append(column.rpartition(' as ')[-1]) + else: + columns_literal.append(column) + columns_named.append(column) + + columns_string = columns_string[:-2] + + column_data = {'column_string': columns_string, + 'column_literal': columns_literal, + 'column_named': columns_named + } + + return column_data + + # TODO: Fix this method. Should not break if kwarg list is not sorted. + @staticmethod + def process_kwargs(kwargs): + + column_parameters = [] + + for kwarg in sorted(kwargs): + if re.search(r"\[(\w+)\]", kwarg) and kwarg[:7] == 'columns': + parameters = re.findall(r"\[(\w+)\]", kwarg) + array_index = '' + for parameter in parameters: + pass_complete = False + if parameter.isdigit(): + array_index = parameter + if parameter == 'data': + data = kwargs.get('columns[' + array_index + '][data]', "") + if parameter == 'orderable': + orderable = kwargs.get('columns[' + array_index + '][orderable]', "") + if parameter == 'searchable': + searchable = kwargs.get('columns[' + array_index + '][searchable]', "") + pass_complete = True + if pass_complete: + row = {'index': int(array_index), + 'data': data, + 'searchable': searchable, + 'orderable': orderable} + column_parameters.append(row) + + return sorted(column_parameters, key=lambda i: i['index']) \ No newline at end of file diff --git a/plexpy/plexwatch.py b/plexpy/plexwatch.py new file mode 100644 index 00000000..76790819 --- /dev/null +++ b/plexpy/plexwatch.py @@ -0,0 +1,218 @@ +# This file is part of PlexPy. +# +# PlexPy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# PlexPy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with PlexPy. If not, see . + +from plexpy import logger, helpers, request, datatables, config + +from xml.dom import minidom +import plexpy +import json + + +class PlexWatch(object): + """ + Retrieve and process data from the plexwatch database + """ + + def __init__(self): + pass + + @staticmethod + def get_history_table_name(): + + if plexpy.CONFIG.GROUPING_GLOBAL_HISTORY: + return "grouped" + else: + return "processed" + + @staticmethod + def get_user_table_name(): + + if plexpy.CONFIG.GROUPING_USER_HISTORY: + return "grouped" + else: + return "processed" + + def get_user_list(self, start='', length='', kwargs=None): + data_tables = datatables.DataTables() + + start = int(start) + length = int(length) + filtered = [] + totalcount = 0 + search_value = "" + search_regex = "" + order_column = 1 + order_dir = "desc" + + if 'order[0][dir]' in kwargs: + order_dir = kwargs.get('order[0][dir]', "desc") + + if 'order[0][column]' in kwargs: + order_column = kwargs.get('order[0][column]', 1) + + if 'search[value]' in kwargs: + search_value = kwargs.get('search[value]', "") + + if 'search[regex]' in kwargs: + search_regex = kwargs.get('search[regex]', "") + + columns = ['user', + 'time', + 'ip_address', + 'COUNT(title) as plays'] + + query = data_tables.ssp_query(table_name=self.get_user_table_name(), + columns=columns, + start=start, + length=length, + order_column=int(order_column), + order_dir=order_dir, + search_value=search_value, + search_regex=search_regex, + custom_where='', + group_by='user', + kwargs=kwargs) + + users = query['result'] + + rows = [] + for item in users: + row = {"plays": item['plays'], + "time": item['time'], + "user": item["user"], + "ip_address": item["ip_address"] + } + + rows.append(row) + + dict = {'recordsFiltered': query['filteredCount'], + 'recordsTotal': query['totalCount'], + 'data': rows, + } + + return dict + + def get_history(self, start='', length='', kwargs=None): + data_tables = datatables.DataTables() + + start = int(start) + length = int(length) + filtered = [] + totalcount = 0 + search_value = "" + search_regex = "" + order_column = 1 + order_dir = "desc" + + if 'order[0][dir]' in kwargs: + order_dir = kwargs.get('order[0][dir]', "desc") + + if 'order[0][column]' in kwargs: + order_column = kwargs.get('order[0][column]', "1") + + if 'search[value]' in kwargs: + search_value = kwargs.get('search[value]', "") + + if 'search[regex]' in kwargs: + search_regex = kwargs.get('search[regex]', "") + + columns = ['id', + 'time as date', + 'user', + 'platform', + 'ip_address', + 'title', + 'time as started', + 'paused_counter', + 'stopped', + 'ratingKey as rating_key', + 'xml', + 'round((julianday(datetime(stopped, "unixepoch", "localtime")) - \ + julianday(datetime(time, "unixepoch", "localtime"))) * 86400) - \ + (case when paused_counter is null then 0 else paused_counter end) as duration' + ] + + query = data_tables.ssp_query(table_name=self.get_history_table_name(), + columns=columns, + start=start, + length=length, + order_column=int(order_column), + order_dir=order_dir, + search_value=search_value, + search_regex=search_regex, + custom_where='', + group_by='', + kwargs=kwargs) + + history = query['result'] + + rows = [] + # NOTE: We are adding in a blank xml field in order enable the Datatables "searchable" parameter + for item in history: + row = {"id": item['id'], + "date": item['date'], + "user": item["user"], + "platform": item["platform"], + "ip_address": item["ip_address"], + "title": item["title"], + "started": item["started"], + "paused_counter": item["paused_counter"], + "stopped": item["stopped"], + "rating_key": item["rating_key"], + "duration": item["duration"], + "percent_complete": 0, + "xml": ""} + + if item['paused_counter'] > 0: + row['paused_counter'] = item['paused_counter'] + else: + row['paused_counter'] = 0 + + if item['started']: + if item['stopped'] > 0: + stopped = item['stopped'] + else: + stopped = 0 + if item['paused_counter'] > 0: + paused_counter = item['paused_counter'] + else: + paused_counter = 0 + + try: + xml_parse = minidom.parseString(helpers.latinToAscii(item['xml'])) + except IOError, e: + logger.warn("Error parsing XML in PlexWatch db: %s" % e) + + xml_head = xml_parse.getElementsByTagName('opt') + if not xml_head: + logger.warn("Error parsing XML in PlexWatch db: %s" % e) + + for s in xml_head: + if s.getAttribute('duration') and s.getAttribute('viewOffset'): + view_offset = helpers.cast_to_float(s.getAttribute('viewOffset')) + duration = helpers.cast_to_float(s.getAttribute('duration')) + if duration > 0: + row['percent_complete'] = (view_offset / duration) * 100 + else: + row['percent_complete'] = 0 + + rows.append(row) + + dict = {'recordsFiltered': query['filteredCount'], + 'recordsTotal': query['totalCount'], + 'data': rows, + } + + return dict diff --git a/plexpy/webserve.py b/plexpy/webserve.py index 0ded4a8d..a577e399 100644 --- a/plexpy/webserve.py +++ b/plexpy/webserve.py @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with PlexPy. If not, see . -from plexpy import logger, db, helpers, notifiers, plextv, pmsconnect +from plexpy import logger, db, helpers, notifiers, plextv, pmsconnect, plexwatch from plexpy.helpers import checked, radio, today, cleanName from xml.dom import minidom @@ -84,100 +84,18 @@ class WebInterface(object): def users(self): return serve_template(templatename="users.html", title="Users") + @cherrypy.expose + def user(self): + return serve_template(templatename="user.html", title="User") + @cherrypy.expose def get_user_list(self, start=0, length=100, **kwargs): - start = int(start) - length = int(length) - filtered = [] - totalcount = 0 - search_value = "" - search_regex = "" - order_column = 1 - order_dir = "desc" - if 'order[0][dir]' in kwargs: - order_dir = kwargs.get('order[0][dir]', "desc") - - if 'order[0][column]' in kwargs: - order_column = kwargs.get('order[0][column]', "1") - - if 'search[value]' in kwargs: - search_value = kwargs.get('search[value]', "") - - if 'search[regex]' in kwargs: - search_regex = kwargs.get('search[regex]', "") - - sortcolumn = 'user' - if order_column == '2': - sortcolumn = 'time' - elif order_column == '3': - sortcolumn = 'ip_address' - elif order_column == '4': - sortcolumn = 'plays' - - myDB = db.DBConnection() - db_table = db.DBConnection().get_history_table_name() - - if search_value == "": - query = 'SELECT COUNT(title) as plays, user, time, \ - SUM(time) as timeTotal, SUM(stopped) as stoppedTotal, \ - SUM(paused_counter) as paused_counterTotal, platform, \ - ip_address, xml \ - from %s GROUP by user ORDER by %s COLLATE NOCASE %s' % (db_table, sortcolumn, order_dir) - filtered = myDB.select(query) - totalcount = len(filtered) - else: - query = 'SELECT COUNT(title) as plays, user, time, \ - SUM(time) as timeTotal, SUM(stopped) as stoppedTotal, \ - SUM(paused_counter) as paused_counterTotal, platform, \ - ip_address, xml \ - from ' + db_table + ' WHERE user LIKE "%' + search_value + '%" \ - GROUP by user' + ' ORDER by %s COLLATE NOCASE %s' % (sortcolumn, order_dir) - filtered = myDB.select(query) - totalcount = myDB.select('SELECT COUNT(*) from %s' % db_table)[0][0] - - users = filtered[start:(start + length)] - rows = [] - for item in users: - row = {"plays": item['plays'], - "time": item['time'], - "user": item["user"], - "timeTotal": item["timeTotal"], - "ip_address": item["ip_address"], - "stoppedTotal": item["stoppedTotal"], - "paused_counterTotal": item["paused_counterTotal"], - "platform": item["platform"] - } - - try: - xml_parse = minidom.parseString(helpers.latinToAscii(item['xml'])) - except IOError, e: - logger.warn("Error parsing XML in PlexWatch db: %s" % e) - - xml_head = xml_parse.getElementsByTagName('User') - if not xml_head: - logger.warn("Error parsing XML in PlexWatch db: %s" % e) - - for s in xml_head: - if s.getAttribute('thumb'): - row['user_thumb'] = s.getAttribute('thumb') - else: - row['user_thumb'] = "" - if s.getAttribute('id'): - row['user_id'] = s.getAttribute('id') - else: - row['user_id'] = "" - - rows.append(row) - - dict = {'recordsFiltered': len(filtered), - 'recordsTotal': totalcount, - 'data': rows, - } - s = json.dumps(dict) + plex_watch = plexwatch.PlexWatch() + users = plex_watch.get_user_list(start, length, kwargs) cherrypy.response.headers['Content-type'] = 'application/json' - return s + return json.dumps(users) @cherrypy.expose def checkGithub(self): @@ -415,127 +333,12 @@ class WebInterface(object): @cherrypy.expose def getHistory_json(self, start=0, length=100, **kwargs): - start = int(start) - length = int(length) - filtered = [] - totalcount = 0 - search_value = "" - search_regex = "" - order_column = 1 - order_dir = "desc" - if 'order[0][dir]' in kwargs: - order_dir = kwargs.get('order[0][dir]', "desc") + plex_watch = plexwatch.PlexWatch() + history = plex_watch.get_history(start, length, kwargs) - if 'order[0][column]' in kwargs: - order_column = kwargs.get('order[0][column]', "1") - - if 'search[value]' in kwargs: - search_value = kwargs.get('search[value]', "") - - if 'search[regex]' in kwargs: - search_regex = kwargs.get('search[regex]', "") - - myDB = db.DBConnection() - db_table = db.DBConnection().get_history_table_name() - - sortcolumn = 'time' - sortbyhavepercent = False - if order_column == '2': - sortcolumn = 'user' - if order_column == '3': - sortcolumn = 'platform' - elif order_column == '4': - sortcolumn = 'ip_address' - elif order_column == '5': - sortcolumn = 'title' - elif order_column == '6': - sortcolumn = 'time' - elif order_column == '7': - sortcolumn = 'paused_counter' - elif order_column == '8': - sortcolumn = 'stopped' - elif order_column == '9': - sortcolumn = 'duration' - - if search_value == "": - query = 'SELECT id, time, user, platform, ip_address, title, time, paused_counter, stopped, ratingKey, xml, \ - round((julianday(datetime(stopped, "unixepoch", "localtime")) - \ - julianday(datetime(time, "unixepoch", "localtime"))) * 86400) - \ - (case when paused_counter is null then 0 else paused_counter end) as duration \ - from %s order by %s COLLATE NOCASE %s' % (db_table, sortcolumn, order_dir) - filtered = myDB.select(query) - totalcount = len(filtered) - else: - query = 'SELECT id, time, user, platform, ip_address, title, time, paused_counter, stopped, ratingKey, xml, \ - round((julianday(datetime(stopped, "unixepoch", "localtime")) - \ - julianday(datetime(time, "unixepoch", "localtime"))) * 86400) - \ - (case when paused_counter is null then 0 else paused_counter end) as duration \ - from ' + db_table + ' WHERE user LIKE "%' + search_value + '%" OR title LIKE "%' + search_value \ - + '%"' + 'ORDER BY %s COLLATE NOCASE %s' % (sortcolumn, order_dir) - filtered = myDB.select(query) - totalcount = myDB.select('SELECT COUNT(*) from processed')[0][0] - - history = filtered[start:(start + length)] - rows = [] - for item in history: - row = {"id": item['id'], - "date": item['time'], - "user": item["user"], - "platform": item["platform"], - "ip_address": item["ip_address"], - "title": item["title"], - "started": item["time"], - "paused": item["paused_counter"], - "stopped": item["stopped"], - "rating_key": item["ratingKey"], - "duration": item["duration"], - "percent_complete": 0, - } - - if item['paused_counter'] > 0: - row['paused'] = item['paused_counter'] - else: - row['paused'] = 0 - - if item['time']: - if item['stopped'] > 0: - stopped = item['stopped'] - else: - stopped = 0 - if item['paused_counter'] > 0: - paused_counter = item['paused_counter'] - else: - paused_counter = 0 - - try: - xml_parse = minidom.parseString(helpers.latinToAscii(item['xml'])) - except IOError, e: - logger.warn("Error parsing XML in PlexWatch db: %s" % e) - - xml_head = xml_parse.getElementsByTagName('opt') - if not xml_head: - logger.warn("Error parsing XML in PlexWatch db: %s" % e) - - for s in xml_head: - if s.getAttribute('duration') and s.getAttribute('viewOffset'): - view_offset = helpers.cast_to_float(s.getAttribute('viewOffset')) - duration = helpers.cast_to_float(s.getAttribute('duration')) - if duration > 0: - row['percent_complete'] = (view_offset / duration) * 100 - else: - row['percent_complete'] = 0 - - rows.append(row) - - dict = {'recordsFiltered': len(filtered), - 'recordsTotal': totalcount, - 'data': rows, - } - s = json.dumps(dict) cherrypy.response.headers['Content-type'] = 'application/json' - return s - + return json.dumps(history) @cherrypy.expose def getStreamDetails(self, id=0, **kwargs):