mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-07-06 21:21:15 -07:00
Bump backports-functools-lru-cache from 1.6.6 to 2.0.0 (#2263)
* Bump backports-functools-lru-cache from 1.6.6 to 2.0.0 Bumps [backports-functools-lru-cache](https://github.com/jaraco/backports.functools_lru_cache) from 1.6.6 to 2.0.0. - [Release notes](https://github.com/jaraco/backports.functools_lru_cache/releases) - [Changelog](https://github.com/jaraco/backports.functools_lru_cache/blob/main/NEWS.rst) - [Commits](https://github.com/jaraco/backports.functools_lru_cache/compare/v1.6.6...v2.0.0) --- updated-dependencies: - dependency-name: backports-functools-lru-cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> * Update backports-functools-lru-cache==2.0.0 --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> [skip ci]
This commit is contained in:
parent
13eb0fd6db
commit
b7836102a9
2 changed files with 162 additions and 114 deletions
|
@ -26,6 +26,12 @@ def update_wrapper(
|
||||||
|
|
||||||
|
|
||||||
class _HashedSeq(list):
|
class _HashedSeq(list):
|
||||||
|
"""This class guarantees that hash() will be called no more than once
|
||||||
|
per element. This is important because the lru_cache() will hash
|
||||||
|
the key multiple times on a cache miss.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
__slots__ = 'hashvalue'
|
__slots__ = 'hashvalue'
|
||||||
|
|
||||||
def __init__(self, tup, hash=hash):
|
def __init__(self, tup, hash=hash):
|
||||||
|
@ -41,45 +47,57 @@ def _make_key(
|
||||||
kwds,
|
kwds,
|
||||||
typed,
|
typed,
|
||||||
kwd_mark=(object(),),
|
kwd_mark=(object(),),
|
||||||
fasttypes=set([int, str, frozenset, type(None)]),
|
fasttypes={int, str},
|
||||||
sorted=sorted,
|
|
||||||
tuple=tuple,
|
tuple=tuple,
|
||||||
type=type,
|
type=type,
|
||||||
len=len,
|
len=len,
|
||||||
):
|
):
|
||||||
'Make a cache key from optionally typed positional and keyword arguments'
|
"""Make a cache key from optionally typed positional and keyword arguments
|
||||||
|
|
||||||
|
The key is constructed in a way that is flat as possible rather than
|
||||||
|
as a nested structure that would take more memory.
|
||||||
|
|
||||||
|
If there is only a single argument and its data type is known to cache
|
||||||
|
its hash value, then that argument is returned without a wrapper. This
|
||||||
|
saves space and improves lookup speed.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# All of code below relies on kwds preserving the order input by the user.
|
||||||
|
# Formerly, we sorted() the kwds before looping. The new way is *much*
|
||||||
|
# faster; however, it means that f(x=1, y=2) will now be treated as a
|
||||||
|
# distinct call from f(y=2, x=1) which will be cached separately.
|
||||||
key = args
|
key = args
|
||||||
if kwds:
|
if kwds:
|
||||||
sorted_items = sorted(kwds.items())
|
|
||||||
key += kwd_mark
|
key += kwd_mark
|
||||||
for item in sorted_items:
|
for item in kwds.items():
|
||||||
key += item
|
key += item
|
||||||
if typed:
|
if typed:
|
||||||
key += tuple(type(v) for v in args)
|
key += tuple(type(v) for v in args)
|
||||||
if kwds:
|
if kwds:
|
||||||
key += tuple(type(v) for k, v in sorted_items)
|
key += tuple(type(v) for v in kwds.values())
|
||||||
elif len(key) == 1 and type(key[0]) in fasttypes:
|
elif len(key) == 1 and type(key[0]) in fasttypes:
|
||||||
return key[0]
|
return key[0]
|
||||||
return _HashedSeq(key)
|
return _HashedSeq(key)
|
||||||
|
|
||||||
|
|
||||||
def lru_cache(maxsize=100, typed=False): # noqa: C901
|
def lru_cache(maxsize=128, typed=False):
|
||||||
"""Least-recently-used cache decorator.
|
"""Least-recently-used cache decorator.
|
||||||
|
|
||||||
If *maxsize* is set to None, the LRU features are disabled and the cache
|
If *maxsize* is set to None, the LRU features are disabled and the cache
|
||||||
can grow without bound.
|
can grow without bound.
|
||||||
|
|
||||||
If *typed* is True, arguments of different types will be cached separately.
|
If *typed* is True, arguments of different types will be cached separately.
|
||||||
For example, f(3.0) and f(3) will be treated as distinct calls with
|
For example, f(decimal.Decimal("3.0")) and f(3.0) will be treated as
|
||||||
distinct results.
|
distinct calls with distinct results. Some types such as str and int may
|
||||||
|
be cached separately even when typed is false.
|
||||||
|
|
||||||
Arguments to the cached function must be hashable.
|
Arguments to the cached function must be hashable.
|
||||||
|
|
||||||
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
|
View the cache statistics named tuple (hits, misses, maxsize, currsize)
|
||||||
f.cache_info(). Clear the cache and statistics with f.cache_clear().
|
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
|
||||||
Access the underlying function with f.__wrapped__.
|
Access the underlying function with f.__wrapped__.
|
||||||
|
|
||||||
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
|
See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -88,108 +106,138 @@ def lru_cache(maxsize=100, typed=False): # noqa: C901
|
||||||
# The internals of the lru_cache are encapsulated for thread safety and
|
# The internals of the lru_cache are encapsulated for thread safety and
|
||||||
# to allow the implementation to change (including a possible C version).
|
# to allow the implementation to change (including a possible C version).
|
||||||
|
|
||||||
|
if isinstance(maxsize, int):
|
||||||
|
# Negative maxsize is treated as 0
|
||||||
|
if maxsize < 0:
|
||||||
|
maxsize = 0
|
||||||
|
elif callable(maxsize) and isinstance(typed, bool):
|
||||||
|
# The user_function was passed in directly via the maxsize argument
|
||||||
|
user_function, maxsize = maxsize, 128
|
||||||
|
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
|
||||||
|
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
|
||||||
|
return update_wrapper(wrapper, user_function)
|
||||||
|
elif maxsize is not None:
|
||||||
|
raise TypeError('Expected first argument to be an integer, a callable, or None')
|
||||||
|
|
||||||
def decorating_function(user_function):
|
def decorating_function(user_function):
|
||||||
cache = dict()
|
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
|
||||||
stats = [0, 0] # make statistics updateable non-locally
|
wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed}
|
||||||
HITS, MISSES = 0, 1 # names for the stats fields
|
return update_wrapper(wrapper, user_function)
|
||||||
make_key = _make_key
|
|
||||||
cache_get = cache.get # bound method to lookup key or return None
|
return decorating_function
|
||||||
_len = len # localize the global len() function
|
|
||||||
|
|
||||||
|
def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
|
||||||
|
# Constants shared by all lru cache instances:
|
||||||
|
sentinel = object() # unique object used to signal cache misses
|
||||||
|
make_key = _make_key # build a key from the function arguments
|
||||||
|
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
|
||||||
|
|
||||||
|
cache = {}
|
||||||
|
hits = misses = 0
|
||||||
|
full = False
|
||||||
|
cache_get = cache.get # bound method to lookup a key or return None
|
||||||
|
cache_len = cache.__len__ # get cache size without calling len()
|
||||||
lock = RLock() # because linkedlist updates aren't threadsafe
|
lock = RLock() # because linkedlist updates aren't threadsafe
|
||||||
root = [] # root of the circular doubly linked list
|
root = [] # root of the circular doubly linked list
|
||||||
root[:] = [root, root, None, None] # initialize by pointing to self
|
root[:] = [root, root, None, None] # initialize by pointing to self
|
||||||
nonlocal_root = [root] # make updateable non-locally
|
|
||||||
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
|
|
||||||
|
|
||||||
if maxsize == 0:
|
if maxsize == 0:
|
||||||
|
|
||||||
def wrapper(*args, **kwds):
|
def wrapper(*args, **kwds):
|
||||||
# no caching, just do a statistics update after a successful call
|
# No caching -- just a statistics update
|
||||||
|
nonlocal misses
|
||||||
|
misses += 1
|
||||||
result = user_function(*args, **kwds)
|
result = user_function(*args, **kwds)
|
||||||
stats[MISSES] += 1
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
elif maxsize is None:
|
elif maxsize is None:
|
||||||
|
|
||||||
def wrapper(*args, **kwds):
|
def wrapper(*args, **kwds):
|
||||||
# simple caching without ordering or size limit
|
# Simple caching without ordering or size limit
|
||||||
|
nonlocal hits, misses
|
||||||
key = make_key(args, kwds, typed)
|
key = make_key(args, kwds, typed)
|
||||||
result = cache_get(
|
result = cache_get(key, sentinel)
|
||||||
key, root
|
if result is not sentinel:
|
||||||
) # root used here as a unique not-found sentinel
|
hits += 1
|
||||||
if result is not root:
|
|
||||||
stats[HITS] += 1
|
|
||||||
return result
|
return result
|
||||||
|
misses += 1
|
||||||
result = user_function(*args, **kwds)
|
result = user_function(*args, **kwds)
|
||||||
cache[key] = result
|
cache[key] = result
|
||||||
stats[MISSES] += 1
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
def wrapper(*args, **kwds):
|
def wrapper(*args, **kwds):
|
||||||
# size limited caching that tracks accesses by recency
|
# Size limited caching that tracks accesses by recency
|
||||||
key = make_key(args, kwds, typed) if kwds or typed else args
|
nonlocal root, hits, misses, full
|
||||||
|
key = make_key(args, kwds, typed)
|
||||||
with lock:
|
with lock:
|
||||||
link = cache_get(key)
|
link = cache_get(key)
|
||||||
if link is not None:
|
if link is not None:
|
||||||
# record recent use of the key by moving it
|
# Move the link to the front of the circular queue
|
||||||
# to the front of the list
|
link_prev, link_next, _key, result = link
|
||||||
(root,) = nonlocal_root
|
|
||||||
link_prev, link_next, key, result = link
|
|
||||||
link_prev[NEXT] = link_next
|
link_prev[NEXT] = link_next
|
||||||
link_next[PREV] = link_prev
|
link_next[PREV] = link_prev
|
||||||
last = root[PREV]
|
last = root[PREV]
|
||||||
last[NEXT] = root[PREV] = link
|
last[NEXT] = root[PREV] = link
|
||||||
link[PREV] = last
|
link[PREV] = last
|
||||||
link[NEXT] = root
|
link[NEXT] = root
|
||||||
stats[HITS] += 1
|
hits += 1
|
||||||
return result
|
return result
|
||||||
|
misses += 1
|
||||||
result = user_function(*args, **kwds)
|
result = user_function(*args, **kwds)
|
||||||
with lock:
|
with lock:
|
||||||
(root,) = nonlocal_root
|
|
||||||
if key in cache:
|
if key in cache:
|
||||||
# getting here means that this same key was added to the
|
# Getting here means that this same key was added to the
|
||||||
# cache while the lock was released. since the link
|
# cache while the lock was released. Since the link
|
||||||
# update is already done, we need only return the
|
# update is already done, we need only return the
|
||||||
# computed result and update the count of misses.
|
# computed result and update the count of misses.
|
||||||
pass
|
pass
|
||||||
elif _len(cache) >= maxsize:
|
elif full:
|
||||||
# use the old root to store the new key and result
|
# Use the old root to store the new key and result.
|
||||||
oldroot = root
|
oldroot = root
|
||||||
oldroot[KEY] = key
|
oldroot[KEY] = key
|
||||||
oldroot[RESULT] = result
|
oldroot[RESULT] = result
|
||||||
# empty the oldest link and make it the new root
|
# Empty the oldest link and make it the new root.
|
||||||
root = nonlocal_root[0] = oldroot[NEXT]
|
# Keep a reference to the old key and old result to
|
||||||
|
# prevent their ref counts from going to zero during the
|
||||||
|
# update. That will prevent potentially arbitrary object
|
||||||
|
# clean-up code (i.e. __del__) from running while we're
|
||||||
|
# still adjusting the links.
|
||||||
|
root = oldroot[NEXT]
|
||||||
oldkey = root[KEY]
|
oldkey = root[KEY]
|
||||||
root[KEY] = root[RESULT] = None
|
root[KEY] = root[RESULT] = None
|
||||||
# now update the cache dictionary for the new links
|
# Now update the cache dictionary.
|
||||||
del cache[oldkey]
|
del cache[oldkey]
|
||||||
|
# Save the potentially reentrant cache[key] assignment
|
||||||
|
# for last, after the root and links have been put in
|
||||||
|
# a consistent state.
|
||||||
cache[key] = oldroot
|
cache[key] = oldroot
|
||||||
else:
|
else:
|
||||||
# put result in a new link at the front of the list
|
# Put result in a new link at the front of the queue.
|
||||||
last = root[PREV]
|
last = root[PREV]
|
||||||
link = [last, root, key, result]
|
link = [last, root, key, result]
|
||||||
last[NEXT] = root[PREV] = cache[key] = link
|
last[NEXT] = root[PREV] = cache[key] = link
|
||||||
stats[MISSES] += 1
|
# Use the cache_len bound method instead of the len() function
|
||||||
|
# which could potentially be wrapped in an lru_cache itself.
|
||||||
|
full = cache_len() >= maxsize
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def cache_info():
|
def cache_info():
|
||||||
"""Report cache statistics"""
|
"""Report cache statistics"""
|
||||||
with lock:
|
with lock:
|
||||||
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
|
return _CacheInfo(hits, misses, maxsize, cache_len())
|
||||||
|
|
||||||
def cache_clear():
|
def cache_clear():
|
||||||
"""Clear the cache and cache statistics"""
|
"""Clear the cache and cache statistics"""
|
||||||
|
nonlocal hits, misses, full
|
||||||
with lock:
|
with lock:
|
||||||
cache.clear()
|
cache.clear()
|
||||||
root = nonlocal_root[0]
|
|
||||||
root[:] = [root, root, None, None]
|
root[:] = [root, root, None, None]
|
||||||
stats[:] = [0, 0]
|
hits = misses = 0
|
||||||
|
full = False
|
||||||
|
|
||||||
wrapper.__wrapped__ = user_function
|
|
||||||
wrapper.cache_info = cache_info
|
wrapper.cache_info = cache_info
|
||||||
wrapper.cache_clear = cache_clear
|
wrapper.cache_clear = cache_clear
|
||||||
return update_wrapper(wrapper, user_function)
|
return wrapper
|
||||||
|
|
||||||
return decorating_function
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
apscheduler==3.10.1
|
apscheduler==3.10.1
|
||||||
arrow==1.3.0
|
arrow==1.3.0
|
||||||
backports.csv==1.0.7
|
backports.csv==1.0.7
|
||||||
backports.functools-lru-cache==1.6.6
|
backports.functools-lru-cache==2.0.0
|
||||||
backports.zoneinfo==0.2.1;python_version<"3.9"
|
backports.zoneinfo==0.2.1;python_version<"3.9"
|
||||||
beautifulsoup4==4.12.2
|
beautifulsoup4==4.12.2
|
||||||
bleach==6.1.0
|
bleach==6.1.0
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue