Move common libs to libs/common

This commit is contained in:
Labrys of Knossos 2018-12-16 13:30:24 -05:00
commit 1f4bd41bcc
1612 changed files with 962 additions and 10 deletions

4
libs/common/dogpile/cache/__init__.py vendored Normal file
View file

@ -0,0 +1,4 @@
from .region import CacheRegion, register_backend, make_region # noqa
# backwards compat
from .. import __version__ # noqa

215
libs/common/dogpile/cache/api.py vendored Normal file
View file

@ -0,0 +1,215 @@
import operator
from ..util.compat import py3k
class NoValue(object):
"""Describe a missing cache value.
The :attr:`.NO_VALUE` module global
should be used.
"""
@property
def payload(self):
return self
def __repr__(self):
"""Ensure __repr__ is a consistent value in case NoValue is used to
fill another cache key.
"""
return '<dogpile.cache.api.NoValue object>'
if py3k:
def __bool__(self): # pragma NO COVERAGE
return False
else:
def __nonzero__(self): # pragma NO COVERAGE
return False
NO_VALUE = NoValue()
"""Value returned from ``get()`` that describes
a key not present."""
class CachedValue(tuple):
"""Represent a value stored in the cache.
:class:`.CachedValue` is a two-tuple of
``(payload, metadata)``, where ``metadata``
is dogpile.cache's tracking information (
currently the creation time). The metadata
and tuple structure is pickleable, if
the backend requires serialization.
"""
payload = property(operator.itemgetter(0))
"""Named accessor for the payload."""
metadata = property(operator.itemgetter(1))
"""Named accessor for the dogpile.cache metadata dictionary."""
def __new__(cls, payload, metadata):
return tuple.__new__(cls, (payload, metadata))
def __reduce__(self):
return CachedValue, (self.payload, self.metadata)
class CacheBackend(object):
"""Base class for backend implementations."""
key_mangler = None
"""Key mangling function.
May be None, or otherwise declared
as an ordinary instance method.
"""
def __init__(self, arguments): # pragma NO COVERAGE
"""Construct a new :class:`.CacheBackend`.
Subclasses should override this to
handle the given arguments.
:param arguments: The ``arguments`` parameter
passed to :func:`.make_registry`.
"""
raise NotImplementedError()
@classmethod
def from_config_dict(cls, config_dict, prefix):
prefix_len = len(prefix)
return cls(
dict(
(key[prefix_len:], config_dict[key])
for key in config_dict
if key.startswith(prefix)
)
)
def has_lock_timeout(self):
return False
def get_mutex(self, key):
"""Return an optional mutexing object for the given key.
This object need only provide an ``acquire()``
and ``release()`` method.
May return ``None``, in which case the dogpile
lock will use a regular ``threading.Lock``
object to mutex concurrent threads for
value creation. The default implementation
returns ``None``.
Different backends may want to provide various
kinds of "mutex" objects, such as those which
link to lock files, distributed mutexes,
memcached semaphores, etc. Whatever
kind of system is best suited for the scope
and behavior of the caching backend.
A mutex that takes the key into account will
allow multiple regenerate operations across
keys to proceed simultaneously, while a mutex
that does not will serialize regenerate operations
to just one at a time across all keys in the region.
The latter approach, or a variant that involves
a modulus of the given key's hash value,
can be used as a means of throttling the total
number of value recreation operations that may
proceed at one time.
"""
return None
def get(self, key): # pragma NO COVERAGE
"""Retrieve a value from the cache.
The returned value should be an instance of
:class:`.CachedValue`, or ``NO_VALUE`` if
not present.
"""
raise NotImplementedError()
def get_multi(self, keys): # pragma NO COVERAGE
"""Retrieve multiple values from the cache.
The returned value should be a list, corresponding
to the list of keys given.
.. versionadded:: 0.5.0
"""
raise NotImplementedError()
def set(self, key, value): # pragma NO COVERAGE
"""Set a value in the cache.
The key will be whatever was passed
to the registry, processed by the
"key mangling" function, if any.
The value will always be an instance
of :class:`.CachedValue`.
"""
raise NotImplementedError()
def set_multi(self, mapping): # pragma NO COVERAGE
"""Set multiple values in the cache.
``mapping`` is a dict in which
the key will be whatever was passed
to the registry, processed by the
"key mangling" function, if any.
The value will always be an instance
of :class:`.CachedValue`.
When implementing a new :class:`.CacheBackend` or cutomizing via
:class:`.ProxyBackend`, be aware that when this method is invoked by
:meth:`.Region.get_or_create_multi`, the ``mapping`` values are the
same ones returned to the upstream caller. If the subclass alters the
values in any way, it must not do so 'in-place' on the ``mapping`` dict
-- that will have the undesirable effect of modifying the returned
values as well.
.. versionadded:: 0.5.0
"""
raise NotImplementedError()
def delete(self, key): # pragma NO COVERAGE
"""Delete a value from the cache.
The key will be whatever was passed
to the registry, processed by the
"key mangling" function, if any.
The behavior here should be idempotent,
that is, can be called any number of times
regardless of whether or not the
key exists.
"""
raise NotImplementedError()
def delete_multi(self, keys): # pragma NO COVERAGE
"""Delete multiple values from the cache.
The key will be whatever was passed
to the registry, processed by the
"key mangling" function, if any.
The behavior here should be idempotent,
that is, can be called any number of times
regardless of whether or not the
key exists.
.. versionadded:: 0.5.0
"""
raise NotImplementedError()

View file

@ -0,0 +1,22 @@
from dogpile.cache.region import register_backend
register_backend(
"dogpile.cache.null", "dogpile.cache.backends.null", "NullBackend")
register_backend(
"dogpile.cache.dbm", "dogpile.cache.backends.file", "DBMBackend")
register_backend(
"dogpile.cache.pylibmc", "dogpile.cache.backends.memcached",
"PylibmcBackend")
register_backend(
"dogpile.cache.bmemcached", "dogpile.cache.backends.memcached",
"BMemcachedBackend")
register_backend(
"dogpile.cache.memcached", "dogpile.cache.backends.memcached",
"MemcachedBackend")
register_backend(
"dogpile.cache.memory", "dogpile.cache.backends.memory", "MemoryBackend")
register_backend(
"dogpile.cache.memory_pickle", "dogpile.cache.backends.memory",
"MemoryPickleBackend")
register_backend(
"dogpile.cache.redis", "dogpile.cache.backends.redis", "RedisBackend")

View file

@ -0,0 +1,447 @@
"""
File Backends
------------------
Provides backends that deal with local filesystem access.
"""
from __future__ import with_statement
from ..api import CacheBackend, NO_VALUE
from contextlib import contextmanager
from ...util import compat
from ... import util
import os
__all__ = 'DBMBackend', 'FileLock', 'AbstractFileLock'
class DBMBackend(CacheBackend):
"""A file-backend using a dbm file to store keys.
Basic usage::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.dbm',
expiration_time = 3600,
arguments = {
"filename":"/path/to/cachefile.dbm"
}
)
DBM access is provided using the Python ``anydbm`` module,
which selects a platform-specific dbm module to use.
This may be made to be more configurable in a future
release.
Note that different dbm modules have different behaviors.
Some dbm implementations handle their own locking, while
others don't. The :class:`.DBMBackend` uses a read/write
lockfile by default, which is compatible even with those
DBM implementations for which this is unnecessary,
though the behavior can be disabled.
The DBM backend by default makes use of two lockfiles.
One is in order to protect the DBM file itself from
concurrent writes, the other is to coordinate
value creation (i.e. the dogpile lock). By default,
these lockfiles use the ``flock()`` system call
for locking; this is **only available on Unix
platforms**. An alternative lock implementation, such as one
which is based on threads or uses a third-party system
such as `portalocker <https://pypi.python.org/pypi/portalocker>`_,
can be dropped in using the ``lock_factory`` argument
in conjunction with the :class:`.AbstractFileLock` base class.
Currently, the dogpile lock is against the entire
DBM file, not per key. This means there can
only be one "creator" job running at a time
per dbm file.
A future improvement might be to have the dogpile lock
using a filename that's based on a modulus of the key.
Locking on a filename that uniquely corresponds to the
key is problematic, since it's not generally safe to
delete lockfiles as the application runs, implying an
unlimited number of key-based files would need to be
created and never deleted.
Parameters to the ``arguments`` dictionary are
below.
:param filename: path of the filename in which to
create the DBM file. Note that some dbm backends
will change this name to have additional suffixes.
:param rw_lockfile: the name of the file to use for
read/write locking. If omitted, a default name
is used by appending the suffix ".rw.lock" to the
DBM filename. If False, then no lock is used.
:param dogpile_lockfile: the name of the file to use
for value creation, i.e. the dogpile lock. If
omitted, a default name is used by appending the
suffix ".dogpile.lock" to the DBM filename. If
False, then dogpile.cache uses the default dogpile
lock, a plain thread-based mutex.
:param lock_factory: a function or class which provides
for a read/write lock. Defaults to :class:`.FileLock`.
Custom implementations need to implement context-manager
based ``read()`` and ``write()`` functions - the
:class:`.AbstractFileLock` class is provided as a base class
which provides these methods based on individual read/write lock
functions. E.g. to replace the lock with the dogpile.core
:class:`.ReadWriteMutex`::
from dogpile.core.readwrite_lock import ReadWriteMutex
from dogpile.cache.backends.file import AbstractFileLock
class MutexLock(AbstractFileLock):
def __init__(self, filename):
self.mutex = ReadWriteMutex()
def acquire_read_lock(self, wait):
ret = self.mutex.acquire_read_lock(wait)
return wait or ret
def acquire_write_lock(self, wait):
ret = self.mutex.acquire_write_lock(wait)
return wait or ret
def release_read_lock(self):
return self.mutex.release_read_lock()
def release_write_lock(self):
return self.mutex.release_write_lock()
from dogpile.cache import make_region
region = make_region().configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename": "file.dbm",
"lock_factory": MutexLock
}
)
While the included :class:`.FileLock` uses ``os.flock()``, a
windows-compatible implementation can be built using a library
such as `portalocker <https://pypi.python.org/pypi/portalocker>`_.
.. versionadded:: 0.5.2
"""
def __init__(self, arguments):
self.filename = os.path.abspath(
os.path.normpath(arguments['filename'])
)
dir_, filename = os.path.split(self.filename)
self.lock_factory = arguments.get("lock_factory", FileLock)
self._rw_lock = self._init_lock(
arguments.get('rw_lockfile'),
".rw.lock", dir_, filename)
self._dogpile_lock = self._init_lock(
arguments.get('dogpile_lockfile'),
".dogpile.lock",
dir_, filename,
util.KeyReentrantMutex.factory)
# TODO: make this configurable
if compat.py3k:
import dbm
else:
import anydbm as dbm
self.dbmmodule = dbm
self._init_dbm_file()
def _init_lock(self, argument, suffix, basedir, basefile, wrapper=None):
if argument is None:
lock = self.lock_factory(os.path.join(basedir, basefile + suffix))
elif argument is not False:
lock = self.lock_factory(
os.path.abspath(
os.path.normpath(argument)
))
else:
return None
if wrapper:
lock = wrapper(lock)
return lock
def _init_dbm_file(self):
exists = os.access(self.filename, os.F_OK)
if not exists:
for ext in ('db', 'dat', 'pag', 'dir'):
if os.access(self.filename + os.extsep + ext, os.F_OK):
exists = True
break
if not exists:
fh = self.dbmmodule.open(self.filename, 'c')
fh.close()
def get_mutex(self, key):
# using one dogpile for the whole file. Other ways
# to do this might be using a set of files keyed to a
# hash/modulus of the key. the issue is it's never
# really safe to delete a lockfile as this can
# break other processes trying to get at the file
# at the same time - so handling unlimited keys
# can't imply unlimited filenames
if self._dogpile_lock:
return self._dogpile_lock(key)
else:
return None
@contextmanager
def _use_rw_lock(self, write):
if self._rw_lock is None:
yield
elif write:
with self._rw_lock.write():
yield
else:
with self._rw_lock.read():
yield
@contextmanager
def _dbm_file(self, write):
with self._use_rw_lock(write):
dbm = self.dbmmodule.open(
self.filename,
"w" if write else "r")
yield dbm
dbm.close()
def get(self, key):
with self._dbm_file(False) as dbm:
if hasattr(dbm, 'get'):
value = dbm.get(key, NO_VALUE)
else:
# gdbm objects lack a .get method
try:
value = dbm[key]
except KeyError:
value = NO_VALUE
if value is not NO_VALUE:
value = compat.pickle.loads(value)
return value
def get_multi(self, keys):
return [self.get(key) for key in keys]
def set(self, key, value):
with self._dbm_file(True) as dbm:
dbm[key] = compat.pickle.dumps(value,
compat.pickle.HIGHEST_PROTOCOL)
def set_multi(self, mapping):
with self._dbm_file(True) as dbm:
for key, value in mapping.items():
dbm[key] = compat.pickle.dumps(value,
compat.pickle.HIGHEST_PROTOCOL)
def delete(self, key):
with self._dbm_file(True) as dbm:
try:
del dbm[key]
except KeyError:
pass
def delete_multi(self, keys):
with self._dbm_file(True) as dbm:
for key in keys:
try:
del dbm[key]
except KeyError:
pass
class AbstractFileLock(object):
"""Coordinate read/write access to a file.
typically is a file-based lock but doesn't necessarily have to be.
The default implementation here is :class:`.FileLock`.
Implementations should provide the following methods::
* __init__()
* acquire_read_lock()
* acquire_write_lock()
* release_read_lock()
* release_write_lock()
The ``__init__()`` method accepts a single argument "filename", which
may be used as the "lock file", for those implementations that use a lock
file.
Note that multithreaded environments must provide a thread-safe
version of this lock. The recommended approach for file-
descriptor-based locks is to use a Python ``threading.local()`` so
that a unique file descriptor is held per thread. See the source
code of :class:`.FileLock` for an implementation example.
"""
def __init__(self, filename):
"""Constructor, is given the filename of a potential lockfile.
The usage of this filename is optional and no file is
created by default.
Raises ``NotImplementedError`` by default, must be
implemented by subclasses.
"""
raise NotImplementedError()
def acquire(self, wait=True):
"""Acquire the "write" lock.
This is a direct call to :meth:`.AbstractFileLock.acquire_write_lock`.
"""
return self.acquire_write_lock(wait)
def release(self):
"""Release the "write" lock.
This is a direct call to :meth:`.AbstractFileLock.release_write_lock`.
"""
self.release_write_lock()
@contextmanager
def read(self):
"""Provide a context manager for the "read" lock.
This method makes use of :meth:`.AbstractFileLock.acquire_read_lock`
and :meth:`.AbstractFileLock.release_read_lock`
"""
self.acquire_read_lock(True)
try:
yield
finally:
self.release_read_lock()
@contextmanager
def write(self):
"""Provide a context manager for the "write" lock.
This method makes use of :meth:`.AbstractFileLock.acquire_write_lock`
and :meth:`.AbstractFileLock.release_write_lock`
"""
self.acquire_write_lock(True)
try:
yield
finally:
self.release_write_lock()
@property
def is_open(self):
"""optional method."""
raise NotImplementedError()
def acquire_read_lock(self, wait):
"""Acquire a 'reader' lock.
Raises ``NotImplementedError`` by default, must be
implemented by subclasses.
"""
raise NotImplementedError()
def acquire_write_lock(self, wait):
"""Acquire a 'write' lock.
Raises ``NotImplementedError`` by default, must be
implemented by subclasses.
"""
raise NotImplementedError()
def release_read_lock(self):
"""Release a 'reader' lock.
Raises ``NotImplementedError`` by default, must be
implemented by subclasses.
"""
raise NotImplementedError()
def release_write_lock(self):
"""Release a 'writer' lock.
Raises ``NotImplementedError`` by default, must be
implemented by subclasses.
"""
raise NotImplementedError()
class FileLock(AbstractFileLock):
"""Use lockfiles to coordinate read/write access to a file.
Only works on Unix systems, using
`fcntl.flock() <http://docs.python.org/library/fcntl.html>`_.
"""
def __init__(self, filename):
self._filedescriptor = compat.threading.local()
self.filename = filename
@util.memoized_property
def _module(self):
import fcntl
return fcntl
@property
def is_open(self):
return hasattr(self._filedescriptor, 'fileno')
def acquire_read_lock(self, wait):
return self._acquire(wait, os.O_RDONLY, self._module.LOCK_SH)
def acquire_write_lock(self, wait):
return self._acquire(wait, os.O_WRONLY, self._module.LOCK_EX)
def release_read_lock(self):
self._release()
def release_write_lock(self):
self._release()
def _acquire(self, wait, wrflag, lockflag):
wrflag |= os.O_CREAT
fileno = os.open(self.filename, wrflag)
try:
if not wait:
lockflag |= self._module.LOCK_NB
self._module.flock(fileno, lockflag)
except IOError:
os.close(fileno)
if not wait:
# this is typically
# "[Errno 35] Resource temporarily unavailable",
# because of LOCK_NB
return False
else:
raise
else:
self._filedescriptor.fileno = fileno
return True
def _release(self):
try:
fileno = self._filedescriptor.fileno
except AttributeError:
return
else:
self._module.flock(fileno, self._module.LOCK_UN)
os.close(fileno)
del self._filedescriptor.fileno

View file

@ -0,0 +1,364 @@
"""
Memcached Backends
------------------
Provides backends for talking to `memcached <http://memcached.org>`_.
"""
from ..api import CacheBackend, NO_VALUE
from ...util import compat
from ... import util
import random
import time
__all__ = 'GenericMemcachedBackend', 'MemcachedBackend',\
'PylibmcBackend', 'BMemcachedBackend', 'MemcachedLock'
class MemcachedLock(object):
"""Simple distributed lock using memcached.
This is an adaptation of the lock featured at
http://amix.dk/blog/post/19386
"""
def __init__(self, client_fn, key, timeout=0):
self.client_fn = client_fn
self.key = "_lock" + key
self.timeout = timeout
def acquire(self, wait=True):
client = self.client_fn()
i = 0
while True:
if client.add(self.key, 1, self.timeout):
return True
elif not wait:
return False
else:
sleep_time = (((i + 1) * random.random()) + 2 ** i) / 2.5
time.sleep(sleep_time)
if i < 15:
i += 1
def release(self):
client = self.client_fn()
client.delete(self.key)
class GenericMemcachedBackend(CacheBackend):
"""Base class for memcached backends.
This base class accepts a number of paramters
common to all backends.
:param url: the string URL to connect to. Can be a single
string or a list of strings. This is the only argument
that's required.
:param distributed_lock: boolean, when True, will use a
memcached-lock as the dogpile lock (see :class:`.MemcachedLock`).
Use this when multiple
processes will be talking to the same memcached instance.
When left at False, dogpile will coordinate on a regular
threading mutex.
:param lock_timeout: integer, number of seconds after acquiring a lock that
memcached should expire it. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.7
:param memcached_expire_time: integer, when present will
be passed as the ``time`` parameter to ``pylibmc.Client.set``.
This is used to set the memcached expiry time for a value.
.. note::
This parameter is **different** from Dogpile's own
``expiration_time``, which is the number of seconds after
which Dogpile will consider the value to be expired.
When Dogpile considers a value to be expired,
it **continues to use the value** until generation
of a new value is complete, when using
:meth:`.CacheRegion.get_or_create`.
Therefore, if you are setting ``memcached_expire_time``, you'll
want to make sure it is greater than ``expiration_time``
by at least enough seconds for new values to be generated,
else the value won't be available during a regeneration,
forcing all threads to wait for a regeneration each time
a value expires.
The :class:`.GenericMemachedBackend` uses a ``threading.local()``
object to store individual client objects per thread,
as most modern memcached clients do not appear to be inherently
threadsafe.
In particular, ``threading.local()`` has the advantage over pylibmc's
built-in thread pool in that it automatically discards objects
associated with a particular thread when that thread ends.
"""
set_arguments = {}
"""Additional arguments which will be passed
to the :meth:`set` method."""
def __init__(self, arguments):
self._imports()
# using a plain threading.local here. threading.local
# automatically deletes the __dict__ when a thread ends,
# so the idea is that this is superior to pylibmc's
# own ThreadMappedPool which doesn't handle this
# automatically.
self.url = util.to_list(arguments['url'])
self.distributed_lock = arguments.get('distributed_lock', False)
self.lock_timeout = arguments.get('lock_timeout', 0)
self.memcached_expire_time = arguments.get(
'memcached_expire_time', 0)
def has_lock_timeout(self):
return self.lock_timeout != 0
def _imports(self):
"""client library imports go here."""
raise NotImplementedError()
def _create_client(self):
"""Creation of a Client instance goes here."""
raise NotImplementedError()
@util.memoized_property
def _clients(self):
backend = self
class ClientPool(compat.threading.local):
def __init__(self):
self.memcached = backend._create_client()
return ClientPool()
@property
def client(self):
"""Return the memcached client.
This uses a threading.local by
default as it appears most modern
memcached libs aren't inherently
threadsafe.
"""
return self._clients.memcached
def get_mutex(self, key):
if self.distributed_lock:
return MemcachedLock(lambda: self.client, key,
timeout=self.lock_timeout)
else:
return None
def get(self, key):
value = self.client.get(key)
if value is None:
return NO_VALUE
else:
return value
def get_multi(self, keys):
values = self.client.get_multi(keys)
return [
NO_VALUE if key not in values
else values[key] for key in keys
]
def set(self, key, value):
self.client.set(
key,
value,
**self.set_arguments
)
def set_multi(self, mapping):
self.client.set_multi(
mapping,
**self.set_arguments
)
def delete(self, key):
self.client.delete(key)
def delete_multi(self, keys):
self.client.delete_multi(keys)
class MemcacheArgs(object):
"""Mixin which provides support for the 'time' argument to set(),
'min_compress_len' to other methods.
"""
def __init__(self, arguments):
self.min_compress_len = arguments.get('min_compress_len', 0)
self.set_arguments = {}
if "memcached_expire_time" in arguments:
self.set_arguments["time"] = arguments["memcached_expire_time"]
if "min_compress_len" in arguments:
self.set_arguments["min_compress_len"] = \
arguments["min_compress_len"]
super(MemcacheArgs, self).__init__(arguments)
pylibmc = None
class PylibmcBackend(MemcacheArgs, GenericMemcachedBackend):
"""A backend for the
`pylibmc <http://sendapatch.se/projects/pylibmc/index.html>`_
memcached client.
A configuration illustrating several of the optional
arguments described in the pylibmc documentation::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.pylibmc',
expiration_time = 3600,
arguments = {
'url':["127.0.0.1"],
'binary':True,
'behaviors':{"tcp_nodelay": True,"ketama":True}
}
)
Arguments accepted here include those of
:class:`.GenericMemcachedBackend`, as well as
those below.
:param binary: sets the ``binary`` flag understood by
``pylibmc.Client``.
:param behaviors: a dictionary which will be passed to
``pylibmc.Client`` as the ``behaviors`` parameter.
:param min_compress_len: Integer, will be passed as the
``min_compress_len`` parameter to the ``pylibmc.Client.set``
method.
"""
def __init__(self, arguments):
self.binary = arguments.get('binary', False)
self.behaviors = arguments.get('behaviors', {})
super(PylibmcBackend, self).__init__(arguments)
def _imports(self):
global pylibmc
import pylibmc # noqa
def _create_client(self):
return pylibmc.Client(
self.url,
binary=self.binary,
behaviors=self.behaviors
)
memcache = None
class MemcachedBackend(MemcacheArgs, GenericMemcachedBackend):
"""A backend using the standard
`Python-memcached <http://www.tummy.com/Community/software/\
python-memcached/>`_
library.
Example::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.memcached',
expiration_time = 3600,
arguments = {
'url':"127.0.0.1:11211"
}
)
"""
def _imports(self):
global memcache
import memcache # noqa
def _create_client(self):
return memcache.Client(self.url)
bmemcached = None
class BMemcachedBackend(GenericMemcachedBackend):
"""A backend for the
`python-binary-memcached <https://github.com/jaysonsantos/\
python-binary-memcached>`_
memcached client.
This is a pure Python memcached client which
includes the ability to authenticate with a memcached
server using SASL.
A typical configuration using username/password::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.bmemcached',
expiration_time = 3600,
arguments = {
'url':["127.0.0.1"],
'username':'scott',
'password':'tiger'
}
)
Arguments which can be passed to the ``arguments``
dictionary include:
:param username: optional username, will be used for
SASL authentication.
:param password: optional password, will be used for
SASL authentication.
"""
def __init__(self, arguments):
self.username = arguments.get('username', None)
self.password = arguments.get('password', None)
super(BMemcachedBackend, self).__init__(arguments)
def _imports(self):
global bmemcached
import bmemcached
class RepairBMemcachedAPI(bmemcached.Client):
"""Repairs BMemcached's non-standard method
signatures, which was fixed in BMemcached
ef206ed4473fec3b639e.
"""
def add(self, key, value, timeout=0):
try:
return super(RepairBMemcachedAPI, self).add(
key, value, timeout)
except ValueError:
return False
self.Client = RepairBMemcachedAPI
def _create_client(self):
return self.Client(
self.url,
username=self.username,
password=self.password
)
def delete_multi(self, keys):
"""python-binary-memcached api does not implements delete_multi"""
for key in keys:
self.delete(key)

View file

@ -0,0 +1,124 @@
"""
Memory Backends
---------------
Provides simple dictionary-based backends.
The two backends are :class:`.MemoryBackend` and :class:`.MemoryPickleBackend`;
the latter applies a serialization step to cached values while the former
places the value as given into the dictionary.
"""
from ..api import CacheBackend, NO_VALUE
from ...util.compat import pickle
class MemoryBackend(CacheBackend):
"""A backend that uses a plain dictionary.
There is no size management, and values which
are placed into the dictionary will remain
until explicitly removed. Note that
Dogpile's expiration of items is based on
timestamps and does not remove them from
the cache.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.memory'
)
To use a Python dictionary of your choosing,
it can be passed in with the ``cache_dict``
argument::
my_dictionary = {}
region = make_region().configure(
'dogpile.cache.memory',
arguments={
"cache_dict":my_dictionary
}
)
"""
pickle_values = False
def __init__(self, arguments):
self._cache = arguments.pop("cache_dict", {})
def get(self, key):
value = self._cache.get(key, NO_VALUE)
if value is not NO_VALUE and self.pickle_values:
value = pickle.loads(value)
return value
def get_multi(self, keys):
ret = [
self._cache.get(key, NO_VALUE)
for key in keys]
if self.pickle_values:
ret = [
pickle.loads(value)
if value is not NO_VALUE else value
for value in ret
]
return ret
def set(self, key, value):
if self.pickle_values:
value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = value
def set_multi(self, mapping):
pickle_values = self.pickle_values
for key, value in mapping.items():
if pickle_values:
value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = value
def delete(self, key):
self._cache.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self._cache.pop(key, None)
class MemoryPickleBackend(MemoryBackend):
"""A backend that uses a plain dictionary, but serializes objects on
:meth:`.MemoryBackend.set` and deserializes :meth:`.MemoryBackend.get`.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.memory_pickle'
)
The usage of pickle to serialize cached values allows an object
as placed in the cache to be a copy of the original given object, so
that any subsequent changes to the given object aren't reflected
in the cached value, thus making the backend behave the same way
as other backends which make use of serialization.
The serialization is performed via pickle, and incurs the same
performance hit in doing so as that of other backends; in this way
the :class:`.MemoryPickleBackend` performance is somewhere in between
that of the pure :class:`.MemoryBackend` and the remote server oriented
backends such as that of Memcached or Redis.
Pickle behavior here is the same as that of the Redis backend, using
either ``cPickle`` or ``pickle`` and specifying ``HIGHEST_PROTOCOL``
upon serialize.
.. versionadded:: 0.5.3
"""
pickle_values = True

View file

@ -0,0 +1,62 @@
"""
Null Backend
-------------
The Null backend does not do any caching at all. It can be
used to test behavior without caching, or as a means of disabling
caching for a region that is otherwise used normally.
.. versionadded:: 0.5.4
"""
from ..api import CacheBackend, NO_VALUE
__all__ = ['NullBackend']
class NullLock(object):
def acquire(self, wait=True):
return True
def release(self):
pass
class NullBackend(CacheBackend):
"""A "null" backend that effectively disables all cache operations.
Basic usage::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.null'
)
"""
def __init__(self, arguments):
pass
def get_mutex(self, key):
return NullLock()
def get(self, key):
return NO_VALUE
def get_multi(self, keys):
return [NO_VALUE for k in keys]
def set(self, key, value):
pass
def set_multi(self, mapping):
pass
def delete(self, key):
pass
def delete_multi(self, keys):
pass

View file

@ -0,0 +1,183 @@
"""
Redis Backends
------------------
Provides backends for talking to `Redis <http://redis.io>`_.
"""
from __future__ import absolute_import
from ..api import CacheBackend, NO_VALUE
from ...util.compat import pickle, u
redis = None
__all__ = 'RedisBackend',
class RedisBackend(CacheBackend):
"""A `Redis <http://redis.io/>`_ backend, using the
`redis-py <http://pypi.python.org/pypi/redis/>`_ backend.
Example configuration::
from dogpile.cache import make_region
region = make_region().configure(
'dogpile.cache.redis',
arguments = {
'host': 'localhost',
'port': 6379,
'db': 0,
'redis_expiration_time': 60*60*2, # 2 hours
'distributed_lock': True
}
)
Arguments accepted in the arguments dictionary:
:param url: string. If provided, will override separate host/port/db
params. The format is that accepted by ``StrictRedis.from_url()``.
.. versionadded:: 0.4.1
:param host: string, default is ``localhost``.
:param password: string, default is no password.
.. versionadded:: 0.4.1
:param port: integer, default is ``6379``.
:param db: integer, default is ``0``.
:param redis_expiration_time: integer, number of seconds after setting
a value that Redis should expire it. This should be larger than dogpile's
cache expiration. By default no expiration is set.
:param distributed_lock: boolean, when True, will use a
redis-lock as the dogpile lock.
Use this when multiple
processes will be talking to the same redis instance.
When left at False, dogpile will coordinate on a regular
threading mutex.
:param lock_timeout: integer, number of seconds after acquiring a lock that
Redis should expire it. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.0
:param socket_timeout: float, seconds for socket timeout.
Default is None (no timeout).
.. versionadded:: 0.5.4
:param lock_sleep: integer, number of seconds to sleep when failed to
acquire a lock. This argument is only valid when
``distributed_lock`` is ``True``.
.. versionadded:: 0.5.0
:param connection_pool: ``redis.ConnectionPool`` object. If provided,
this object supersedes other connection arguments passed to the
``redis.StrictRedis`` instance, including url and/or host as well as
socket_timeout, and will be passed to ``redis.StrictRedis`` as the
source of connectivity.
.. versionadded:: 0.5.4
"""
def __init__(self, arguments):
arguments = arguments.copy()
self._imports()
self.url = arguments.pop('url', None)
self.host = arguments.pop('host', 'localhost')
self.password = arguments.pop('password', None)
self.port = arguments.pop('port', 6379)
self.db = arguments.pop('db', 0)
self.distributed_lock = arguments.get('distributed_lock', False)
self.socket_timeout = arguments.pop('socket_timeout', None)
self.lock_timeout = arguments.get('lock_timeout', None)
self.lock_sleep = arguments.get('lock_sleep', 0.1)
self.redis_expiration_time = arguments.pop('redis_expiration_time', 0)
self.connection_pool = arguments.get('connection_pool', None)
self.client = self._create_client()
def _imports(self):
# defer imports until backend is used
global redis
import redis # noqa
def _create_client(self):
if self.connection_pool is not None:
# the connection pool already has all other connection
# options present within, so here we disregard socket_timeout
# and others.
return redis.StrictRedis(connection_pool=self.connection_pool)
args = {}
if self.socket_timeout:
args['socket_timeout'] = self.socket_timeout
if self.url is not None:
args.update(url=self.url)
return redis.StrictRedis.from_url(**args)
else:
args.update(
host=self.host, password=self.password,
port=self.port, db=self.db
)
return redis.StrictRedis(**args)
def get_mutex(self, key):
if self.distributed_lock:
return self.client.lock(u('_lock{0}').format(key),
self.lock_timeout, self.lock_sleep)
else:
return None
def get(self, key):
value = self.client.get(key)
if value is None:
return NO_VALUE
return pickle.loads(value)
def get_multi(self, keys):
if not keys:
return []
values = self.client.mget(keys)
return [
pickle.loads(v) if v is not None else NO_VALUE
for v in values]
def set(self, key, value):
if self.redis_expiration_time:
self.client.setex(key, self.redis_expiration_time,
pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
else:
self.client.set(key, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def set_multi(self, mapping):
mapping = dict(
(k, pickle.dumps(v, pickle.HIGHEST_PROTOCOL))
for k, v in mapping.items()
)
if not self.redis_expiration_time:
self.client.mset(mapping)
else:
pipe = self.client.pipeline()
for key, value in mapping.items():
pipe.setex(key, self.redis_expiration_time, value)
pipe.execute()
def delete(self, key):
self.client.delete(key)
def delete_multi(self, keys):
self.client.delete(*keys)

25
libs/common/dogpile/cache/exception.py vendored Normal file
View file

@ -0,0 +1,25 @@
"""Exception classes for dogpile.cache."""
class DogpileCacheException(Exception):
"""Base Exception for dogpile.cache exceptions to inherit from."""
class RegionAlreadyConfigured(DogpileCacheException):
"""CacheRegion instance is already configured."""
class RegionNotConfigured(DogpileCacheException):
"""CacheRegion instance has not been configured."""
class ValidationError(DogpileCacheException):
"""Error validating a value or option."""
class PluginNotFound(DogpileCacheException):
"""The specified plugin could not be found.
.. versionadded:: 0.6.4
"""

View file

View file

@ -0,0 +1,90 @@
"""
Mako Integration
----------------
dogpile.cache includes a `Mako <http://www.makotemplates.org>`_ plugin
that replaces `Beaker <http://beaker.groovie.org>`_
as the cache backend.
Setup a Mako template lookup using the "dogpile.cache" cache implementation
and a region dictionary::
from dogpile.cache import make_region
from mako.lookup import TemplateLookup
my_regions = {
"local":make_region().configure(
"dogpile.cache.dbm",
expiration_time=360,
arguments={"filename":"file.dbm"}
),
"memcached":make_region().configure(
"dogpile.cache.pylibmc",
expiration_time=3600,
arguments={"url":["127.0.0.1"]}
)
}
mako_lookup = TemplateLookup(
directories=["/myapp/templates"],
cache_impl="dogpile.cache",
cache_args={
'regions':my_regions
}
)
To use the above configuration in a template, use the ``cached=True``
argument on any Mako tag which accepts it, in conjunction with the
name of the desired region as the ``cache_region`` argument::
<%def name="mysection()" cached="True" cache_region="memcached">
some content that's cached
</%def>
"""
from mako.cache import CacheImpl
class MakoPlugin(CacheImpl):
"""A Mako ``CacheImpl`` which talks to dogpile.cache."""
def __init__(self, cache):
super(MakoPlugin, self).__init__(cache)
try:
self.regions = self.cache.template.cache_args['regions']
except KeyError:
raise KeyError(
"'cache_regions' argument is required on the "
"Mako Lookup or Template object for usage "
"with the dogpile.cache plugin.")
def _get_region(self, **kw):
try:
region = kw['region']
except KeyError:
raise KeyError(
"'cache_region' argument must be specified with 'cache=True'"
"within templates for usage with the dogpile.cache plugin.")
try:
return self.regions[region]
except KeyError:
raise KeyError("No such region '%s'" % region)
def get_and_replace(self, key, creation_function, **kw):
expiration_time = kw.pop("timeout", None)
return self._get_region(**kw).get_or_create(
key, creation_function,
expiration_time=expiration_time)
def get_or_create(self, key, creation_function, **kw):
return self.get_and_replace(key, creation_function, **kw)
def put(self, key, value, **kw):
self._get_region(**kw).put(key, value)
def get(self, key, **kw):
expiration_time = kw.pop("timeout", None)
return self._get_region(**kw).get(key, expiration_time=expiration_time)
def invalidate(self, key, **kw):
self._get_region(**kw).delete(key)

95
libs/common/dogpile/cache/proxy.py vendored Normal file
View file

@ -0,0 +1,95 @@
"""
Proxy Backends
------------------
Provides a utility and a decorator class that allow for modifying the behavior
of different backends without altering the class itself or having to extend the
base backend.
.. versionadded:: 0.5.0 Added support for the :class:`.ProxyBackend` class.
"""
from .api import CacheBackend
class ProxyBackend(CacheBackend):
"""A decorator class for altering the functionality of backends.
Basic usage::
from dogpile.cache import make_region
from dogpile.cache.proxy import ProxyBackend
class MyFirstProxy(ProxyBackend):
def get(self, key):
# ... custom code goes here ...
return self.proxied.get(key)
def set(self, key, value):
# ... custom code goes here ...
self.proxied.set(key)
class MySecondProxy(ProxyBackend):
def get(self, key):
# ... custom code goes here ...
return self.proxied.get(key)
region = make_region().configure(
'dogpile.cache.dbm',
expiration_time = 3600,
arguments = {
"filename":"/path/to/cachefile.dbm"
},
wrap = [ MyFirstProxy, MySecondProxy ]
)
Classes that extend :class:`.ProxyBackend` can be stacked
together. The ``.proxied`` property will always
point to either the concrete backend instance or
the next proxy in the chain that a method can be
delegated towards.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
self.proxied = None
def wrap(self, backend):
''' Take a backend as an argument and setup the self.proxied property.
Return an object that be used as a backend by a :class:`.CacheRegion`
object.
'''
assert(
isinstance(backend, CacheBackend) or
isinstance(backend, ProxyBackend))
self.proxied = backend
return self
#
# Delegate any functions that are not already overridden to
# the proxies backend
#
def get(self, key):
return self.proxied.get(key)
def set(self, key, value):
self.proxied.set(key, value)
def delete(self, key):
self.proxied.delete(key)
def get_multi(self, keys):
return self.proxied.get_multi(keys)
def set_multi(self, mapping):
self.proxied.set_multi(mapping)
def delete_multi(self, keys):
self.proxied.delete_multi(keys)
def get_mutex(self, key):
return self.proxied.get_mutex(key)

1540
libs/common/dogpile/cache/region.py vendored Normal file

File diff suppressed because it is too large Load diff

145
libs/common/dogpile/cache/util.py vendored Normal file
View file

@ -0,0 +1,145 @@
from hashlib import sha1
from ..util import compat
from ..util import langhelpers
def function_key_generator(namespace, fn, to_str=compat.string_type):
"""Return a function that generates a string
key, based on a given function as well as
arguments to the returned function itself.
This is used by :meth:`.CacheRegion.cache_on_arguments`
to generate a cache key from a decorated function.
An alternate function may be used by specifying
the :paramref:`.CacheRegion.function_key_generator` argument
for :class:`.CacheRegion`.
.. seealso::
:func:`.kwarg_function_key_generator` - similar function that also
takes keyword arguments into account
"""
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = compat.inspect_getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_key(*args, **kw):
if kw:
raise ValueError(
"dogpile.cache's default key creation "
"function does not accept keyword arguments.")
if has_self:
args = args[1:]
return namespace + "|" + " ".join(map(to_str, args))
return generate_key
def function_multi_key_generator(namespace, fn, to_str=compat.string_type):
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = compat.inspect_getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_keys(*args, **kw):
if kw:
raise ValueError(
"dogpile.cache's default key creation "
"function does not accept keyword arguments.")
if has_self:
args = args[1:]
return [namespace + "|" + key for key in map(to_str, args)]
return generate_keys
def kwarg_function_key_generator(namespace, fn, to_str=compat.string_type):
"""Return a function that generates a string
key, based on a given function as well as
arguments to the returned function itself.
For kwargs passed in, we will build a dict of
all argname (key) argvalue (values) including
default args from the argspec and then
alphabetize the list before generating the
key.
.. versionadded:: 0.6.2
.. seealso::
:func:`.function_key_generator` - default key generation function
"""
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
argspec = compat.inspect_getargspec(fn)
default_list = list(argspec.defaults or [])
# Reverse the list, as we want to compare the argspec by negative index,
# meaning default_list[0] should be args[-1], which works well with
# enumerate()
default_list.reverse()
# use idx*-1 to create the correct right-lookup index.
args_with_defaults = dict((argspec.args[(idx*-1)], default)
for idx, default in enumerate(default_list, 1))
if argspec.args and argspec.args[0] in ('self', 'cls'):
arg_index_start = 1
else:
arg_index_start = 0
def generate_key(*args, **kwargs):
as_kwargs = dict(
[(argspec.args[idx], arg)
for idx, arg in enumerate(args[arg_index_start:],
arg_index_start)])
as_kwargs.update(kwargs)
for arg, val in args_with_defaults.items():
if arg not in as_kwargs:
as_kwargs[arg] = val
argument_values = [as_kwargs[key]
for key in sorted(as_kwargs.keys())]
return namespace + '|' + " ".join(map(to_str, argument_values))
return generate_key
def sha1_mangle_key(key):
"""a SHA1 key mangler."""
return sha1(key).hexdigest()
def length_conditional_mangler(length, mangler):
"""a key mangler that mangles if the length of the key is
past a certain threshold.
"""
def mangle(key):
if len(key) >= length:
return mangler(key)
else:
return key
return mangle
# in the 0.6 release these functions were moved to the dogpile.util namespace.
# They are linked here to maintain compatibility with older versions.
coerce_string_conf = langhelpers.coerce_string_conf
KeyReentrantMutex = langhelpers.KeyReentrantMutex
memoized_property = langhelpers.memoized_property
PluginLoader = langhelpers.PluginLoader
to_list = langhelpers.to_list