mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-08-21 05:43:22 -07:00
Remove unused Python 2 libraries
This commit is contained in:
parent
53369cd8a6
commit
f0b700233f
21 changed files with 0 additions and 10752 deletions
2392
lib/argparse.py
2392
lib/argparse.py
File diff suppressed because it is too large
Load diff
|
@ -1,21 +0,0 @@
|
|||
Copyright 2009 Brian Quinlan. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY BRIAN QUINLAN "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
HALL THE FREEBSD PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,3 +0,0 @@
|
|||
from pkgutil import extend_path
|
||||
|
||||
__path__ = extend_path(__path__, __name__)
|
|
@ -1,23 +0,0 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Execute computations asynchronously using threads or processes."""
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
from concurrent.futures._base import (FIRST_COMPLETED,
|
||||
FIRST_EXCEPTION,
|
||||
ALL_COMPLETED,
|
||||
CancelledError,
|
||||
TimeoutError,
|
||||
Future,
|
||||
Executor,
|
||||
wait,
|
||||
as_completed)
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
|
||||
# Jython doesn't have multiprocessing
|
||||
try:
|
||||
from concurrent.futures.process import ProcessPoolExecutor
|
||||
except ImportError:
|
||||
pass
|
|
@ -1,605 +0,0 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
from __future__ import with_statement
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
from concurrent.futures._compat import reraise
|
||||
|
||||
try:
|
||||
from collections import namedtuple
|
||||
except ImportError:
|
||||
from concurrent.futures._compat import namedtuple
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
FIRST_COMPLETED = 'FIRST_COMPLETED'
|
||||
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
|
||||
ALL_COMPLETED = 'ALL_COMPLETED'
|
||||
_AS_COMPLETED = '_AS_COMPLETED'
|
||||
|
||||
# Possible future states (for internal use by the futures package).
|
||||
PENDING = 'PENDING'
|
||||
RUNNING = 'RUNNING'
|
||||
# The future was cancelled by the user...
|
||||
CANCELLED = 'CANCELLED'
|
||||
# ...and _Waiter.add_cancelled() was called by a worker.
|
||||
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
|
||||
FINISHED = 'FINISHED'
|
||||
|
||||
_FUTURE_STATES = [
|
||||
PENDING,
|
||||
RUNNING,
|
||||
CANCELLED,
|
||||
CANCELLED_AND_NOTIFIED,
|
||||
FINISHED
|
||||
]
|
||||
|
||||
_STATE_TO_DESCRIPTION_MAP = {
|
||||
PENDING: "pending",
|
||||
RUNNING: "running",
|
||||
CANCELLED: "cancelled",
|
||||
CANCELLED_AND_NOTIFIED: "cancelled",
|
||||
FINISHED: "finished"
|
||||
}
|
||||
|
||||
# Logger for internal use by the futures package.
|
||||
LOGGER = logging.getLogger("concurrent.futures")
|
||||
|
||||
class Error(Exception):
|
||||
"""Base class for all future-related exceptions."""
|
||||
pass
|
||||
|
||||
class CancelledError(Error):
|
||||
"""The Future was cancelled."""
|
||||
pass
|
||||
|
||||
class TimeoutError(Error):
|
||||
"""The operation exceeded the given deadline."""
|
||||
pass
|
||||
|
||||
class _Waiter(object):
|
||||
"""Provides the event that wait() and as_completed() block on."""
|
||||
def __init__(self):
|
||||
self.event = threading.Event()
|
||||
self.finished_futures = []
|
||||
|
||||
def add_result(self, future):
|
||||
self.finished_futures.append(future)
|
||||
|
||||
def add_exception(self, future):
|
||||
self.finished_futures.append(future)
|
||||
|
||||
def add_cancelled(self, future):
|
||||
self.finished_futures.append(future)
|
||||
|
||||
class _AsCompletedWaiter(_Waiter):
|
||||
"""Used by as_completed()."""
|
||||
|
||||
def __init__(self):
|
||||
super(_AsCompletedWaiter, self).__init__()
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def add_result(self, future):
|
||||
with self.lock:
|
||||
super(_AsCompletedWaiter, self).add_result(future)
|
||||
self.event.set()
|
||||
|
||||
def add_exception(self, future):
|
||||
with self.lock:
|
||||
super(_AsCompletedWaiter, self).add_exception(future)
|
||||
self.event.set()
|
||||
|
||||
def add_cancelled(self, future):
|
||||
with self.lock:
|
||||
super(_AsCompletedWaiter, self).add_cancelled(future)
|
||||
self.event.set()
|
||||
|
||||
class _FirstCompletedWaiter(_Waiter):
|
||||
"""Used by wait(return_when=FIRST_COMPLETED)."""
|
||||
|
||||
def add_result(self, future):
|
||||
super(_FirstCompletedWaiter, self).add_result(future)
|
||||
self.event.set()
|
||||
|
||||
def add_exception(self, future):
|
||||
super(_FirstCompletedWaiter, self).add_exception(future)
|
||||
self.event.set()
|
||||
|
||||
def add_cancelled(self, future):
|
||||
super(_FirstCompletedWaiter, self).add_cancelled(future)
|
||||
self.event.set()
|
||||
|
||||
class _AllCompletedWaiter(_Waiter):
|
||||
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
|
||||
|
||||
def __init__(self, num_pending_calls, stop_on_exception):
|
||||
self.num_pending_calls = num_pending_calls
|
||||
self.stop_on_exception = stop_on_exception
|
||||
self.lock = threading.Lock()
|
||||
super(_AllCompletedWaiter, self).__init__()
|
||||
|
||||
def _decrement_pending_calls(self):
|
||||
with self.lock:
|
||||
self.num_pending_calls -= 1
|
||||
if not self.num_pending_calls:
|
||||
self.event.set()
|
||||
|
||||
def add_result(self, future):
|
||||
super(_AllCompletedWaiter, self).add_result(future)
|
||||
self._decrement_pending_calls()
|
||||
|
||||
def add_exception(self, future):
|
||||
super(_AllCompletedWaiter, self).add_exception(future)
|
||||
if self.stop_on_exception:
|
||||
self.event.set()
|
||||
else:
|
||||
self._decrement_pending_calls()
|
||||
|
||||
def add_cancelled(self, future):
|
||||
super(_AllCompletedWaiter, self).add_cancelled(future)
|
||||
self._decrement_pending_calls()
|
||||
|
||||
class _AcquireFutures(object):
|
||||
"""A context manager that does an ordered acquire of Future conditions."""
|
||||
|
||||
def __init__(self, futures):
|
||||
self.futures = sorted(futures, key=id)
|
||||
|
||||
def __enter__(self):
|
||||
for future in self.futures:
|
||||
future._condition.acquire()
|
||||
|
||||
def __exit__(self, *args):
|
||||
for future in self.futures:
|
||||
future._condition.release()
|
||||
|
||||
def _create_and_install_waiters(fs, return_when):
|
||||
if return_when == _AS_COMPLETED:
|
||||
waiter = _AsCompletedWaiter()
|
||||
elif return_when == FIRST_COMPLETED:
|
||||
waiter = _FirstCompletedWaiter()
|
||||
else:
|
||||
pending_count = sum(
|
||||
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
|
||||
|
||||
if return_when == FIRST_EXCEPTION:
|
||||
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
|
||||
elif return_when == ALL_COMPLETED:
|
||||
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
|
||||
else:
|
||||
raise ValueError("Invalid return condition: %r" % return_when)
|
||||
|
||||
for f in fs:
|
||||
f._waiters.append(waiter)
|
||||
|
||||
return waiter
|
||||
|
||||
def as_completed(fs, timeout=None):
|
||||
"""An iterator over the given futures that yields each as it completes.
|
||||
|
||||
Args:
|
||||
fs: The sequence of Futures (possibly created by different Executors) to
|
||||
iterate over.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
|
||||
Returns:
|
||||
An iterator that yields the given Futures as they complete (finished or
|
||||
cancelled).
|
||||
|
||||
Raises:
|
||||
TimeoutError: If the entire result iterator could not be generated
|
||||
before the given timeout.
|
||||
"""
|
||||
if timeout is not None:
|
||||
end_time = timeout + time.time()
|
||||
|
||||
with _AcquireFutures(fs):
|
||||
finished = set(
|
||||
f for f in fs
|
||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
||||
pending = set(fs) - finished
|
||||
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
|
||||
|
||||
try:
|
||||
for future in finished:
|
||||
yield future
|
||||
|
||||
while pending:
|
||||
if timeout is None:
|
||||
wait_timeout = None
|
||||
else:
|
||||
wait_timeout = end_time - time.time()
|
||||
if wait_timeout < 0:
|
||||
raise TimeoutError(
|
||||
'%d (of %d) futures unfinished' % (
|
||||
len(pending), len(fs)))
|
||||
|
||||
waiter.event.wait(wait_timeout)
|
||||
|
||||
with waiter.lock:
|
||||
finished = waiter.finished_futures
|
||||
waiter.finished_futures = []
|
||||
waiter.event.clear()
|
||||
|
||||
for future in finished:
|
||||
yield future
|
||||
pending.remove(future)
|
||||
|
||||
finally:
|
||||
for f in fs:
|
||||
f._waiters.remove(waiter)
|
||||
|
||||
DoneAndNotDoneFutures = namedtuple(
|
||||
'DoneAndNotDoneFutures', 'done not_done')
|
||||
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
|
||||
"""Wait for the futures in the given sequence to complete.
|
||||
|
||||
Args:
|
||||
fs: The sequence of Futures (possibly created by different Executors) to
|
||||
wait upon.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
return_when: Indicates when this function should return. The options
|
||||
are:
|
||||
|
||||
FIRST_COMPLETED - Return when any future finishes or is
|
||||
cancelled.
|
||||
FIRST_EXCEPTION - Return when any future finishes by raising an
|
||||
exception. If no future raises an exception
|
||||
then it is equivalent to ALL_COMPLETED.
|
||||
ALL_COMPLETED - Return when all futures finish or are cancelled.
|
||||
|
||||
Returns:
|
||||
A named 2-tuple of sets. The first set, named 'done', contains the
|
||||
futures that completed (is finished or cancelled) before the wait
|
||||
completed. The second set, named 'not_done', contains uncompleted
|
||||
futures.
|
||||
"""
|
||||
with _AcquireFutures(fs):
|
||||
done = set(f for f in fs
|
||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
||||
not_done = set(fs) - done
|
||||
|
||||
if (return_when == FIRST_COMPLETED) and done:
|
||||
return DoneAndNotDoneFutures(done, not_done)
|
||||
elif (return_when == FIRST_EXCEPTION) and done:
|
||||
if any(f for f in done
|
||||
if not f.cancelled() and f.exception() is not None):
|
||||
return DoneAndNotDoneFutures(done, not_done)
|
||||
|
||||
if len(done) == len(fs):
|
||||
return DoneAndNotDoneFutures(done, not_done)
|
||||
|
||||
waiter = _create_and_install_waiters(fs, return_when)
|
||||
|
||||
waiter.event.wait(timeout)
|
||||
for f in fs:
|
||||
f._waiters.remove(waiter)
|
||||
|
||||
done.update(waiter.finished_futures)
|
||||
return DoneAndNotDoneFutures(done, set(fs) - done)
|
||||
|
||||
class Future(object):
|
||||
"""Represents the result of an asynchronous computation."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initializes the future. Should not be called by clients."""
|
||||
self._condition = threading.Condition()
|
||||
self._state = PENDING
|
||||
self._result = None
|
||||
self._exception = None
|
||||
self._traceback = None
|
||||
self._waiters = []
|
||||
self._done_callbacks = []
|
||||
|
||||
def _invoke_callbacks(self):
|
||||
for callback in self._done_callbacks:
|
||||
try:
|
||||
callback(self)
|
||||
except Exception:
|
||||
LOGGER.exception('exception calling callback for %r', self)
|
||||
|
||||
def __repr__(self):
|
||||
with self._condition:
|
||||
if self._state == FINISHED:
|
||||
if self._exception:
|
||||
return '<Future at %s state=%s raised %s>' % (
|
||||
hex(id(self)),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
||||
self._exception.__class__.__name__)
|
||||
else:
|
||||
return '<Future at %s state=%s returned %s>' % (
|
||||
hex(id(self)),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
||||
self._result.__class__.__name__)
|
||||
return '<Future at %s state=%s>' % (
|
||||
hex(id(self)),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state])
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the future if possible.
|
||||
|
||||
Returns True if the future was cancelled, False otherwise. A future
|
||||
cannot be cancelled if it is running or has already completed.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state in [RUNNING, FINISHED]:
|
||||
return False
|
||||
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
return True
|
||||
|
||||
self._state = CANCELLED
|
||||
self._condition.notify_all()
|
||||
|
||||
self._invoke_callbacks()
|
||||
return True
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future has cancelled."""
|
||||
with self._condition:
|
||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
|
||||
|
||||
def running(self):
|
||||
"""Return True if the future is currently executing."""
|
||||
with self._condition:
|
||||
return self._state == RUNNING
|
||||
|
||||
def done(self):
|
||||
"""Return True of the future was cancelled or finished executing."""
|
||||
with self._condition:
|
||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
|
||||
|
||||
def __get_result(self):
|
||||
if self._exception:
|
||||
reraise(self._exception, self._traceback)
|
||||
else:
|
||||
return self._result
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
"""Attaches a callable that will be called when the future finishes.
|
||||
|
||||
Args:
|
||||
fn: A callable that will be called with this future as its only
|
||||
argument when the future completes or is cancelled. The callable
|
||||
will always be called by a thread in the same process in which
|
||||
it was added. If the future has already completed or been
|
||||
cancelled then the callable will be called immediately. These
|
||||
callables are called in the order that they were added.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
|
||||
self._done_callbacks.append(fn)
|
||||
return
|
||||
fn(self)
|
||||
|
||||
def result(self, timeout=None):
|
||||
"""Return the result of the call that the future represents.
|
||||
|
||||
Args:
|
||||
timeout: The number of seconds to wait for the result if the future
|
||||
isn't done. If None, then there is no limit on the wait time.
|
||||
|
||||
Returns:
|
||||
The result of the call that the future represents.
|
||||
|
||||
Raises:
|
||||
CancelledError: If the future was cancelled.
|
||||
TimeoutError: If the future didn't finish executing before the given
|
||||
timeout.
|
||||
Exception: If the call raised then that exception will be raised.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self.__get_result()
|
||||
|
||||
self._condition.wait(timeout)
|
||||
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self.__get_result()
|
||||
else:
|
||||
raise TimeoutError()
|
||||
|
||||
def exception_info(self, timeout=None):
|
||||
"""Return a tuple of (exception, traceback) raised by the call that the
|
||||
future represents.
|
||||
|
||||
Args:
|
||||
timeout: The number of seconds to wait for the exception if the
|
||||
future isn't done. If None, then there is no limit on the wait
|
||||
time.
|
||||
|
||||
Returns:
|
||||
The exception raised by the call that the future represents or None
|
||||
if the call completed without raising.
|
||||
|
||||
Raises:
|
||||
CancelledError: If the future was cancelled.
|
||||
TimeoutError: If the future didn't finish executing before the given
|
||||
timeout.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self._exception, self._traceback
|
||||
|
||||
self._condition.wait(timeout)
|
||||
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self._exception, self._traceback
|
||||
else:
|
||||
raise TimeoutError()
|
||||
|
||||
def exception(self, timeout=None):
|
||||
"""Return the exception raised by the call that the future represents.
|
||||
|
||||
Args:
|
||||
timeout: The number of seconds to wait for the exception if the
|
||||
future isn't done. If None, then there is no limit on the wait
|
||||
time.
|
||||
|
||||
Returns:
|
||||
The exception raised by the call that the future represents or None
|
||||
if the call completed without raising.
|
||||
|
||||
Raises:
|
||||
CancelledError: If the future was cancelled.
|
||||
TimeoutError: If the future didn't finish executing before the given
|
||||
timeout.
|
||||
"""
|
||||
return self.exception_info(timeout)[0]
|
||||
|
||||
# The following methods should only be used by Executors and in tests.
|
||||
def set_running_or_notify_cancel(self):
|
||||
"""Mark the future as running or process any cancel notifications.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
|
||||
If the future has been cancelled (cancel() was called and returned
|
||||
True) then any threads waiting on the future completing (though calls
|
||||
to as_completed() or wait()) are notified and False is returned.
|
||||
|
||||
If the future was not cancelled then it is put in the running state
|
||||
(future calls to running() will return True) and True is returned.
|
||||
|
||||
This method should be called by Executor implementations before
|
||||
executing the work associated with this future. If this method returns
|
||||
False then the work should not be executed.
|
||||
|
||||
Returns:
|
||||
False if the Future was cancelled, True otherwise.
|
||||
|
||||
Raises:
|
||||
RuntimeError: if this method was already called or if set_result()
|
||||
or set_exception() was called.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state == CANCELLED:
|
||||
self._state = CANCELLED_AND_NOTIFIED
|
||||
for waiter in self._waiters:
|
||||
waiter.add_cancelled(self)
|
||||
# self._condition.notify_all() is not necessary because
|
||||
# self.cancel() triggers a notification.
|
||||
return False
|
||||
elif self._state == PENDING:
|
||||
self._state = RUNNING
|
||||
return True
|
||||
else:
|
||||
LOGGER.critical('Future %s in unexpected state: %s',
|
||||
id(self.future),
|
||||
self.future._state)
|
||||
raise RuntimeError('Future in unexpected state')
|
||||
|
||||
def set_result(self, result):
|
||||
"""Sets the return value of work associated with the future.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
"""
|
||||
with self._condition:
|
||||
self._result = result
|
||||
self._state = FINISHED
|
||||
for waiter in self._waiters:
|
||||
waiter.add_result(self)
|
||||
self._condition.notify_all()
|
||||
self._invoke_callbacks()
|
||||
|
||||
def set_exception_info(self, exception, traceback):
|
||||
"""Sets the result of the future as being the given exception
|
||||
and traceback.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
"""
|
||||
with self._condition:
|
||||
self._exception = exception
|
||||
self._traceback = traceback
|
||||
self._state = FINISHED
|
||||
for waiter in self._waiters:
|
||||
waiter.add_exception(self)
|
||||
self._condition.notify_all()
|
||||
self._invoke_callbacks()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Sets the result of the future as being the given exception.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
"""
|
||||
self.set_exception_info(exception, None)
|
||||
|
||||
class Executor(object):
|
||||
"""This is an abstract base class for concrete asynchronous executors."""
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
"""Submits a callable to be executed with the given arguments.
|
||||
|
||||
Schedules the callable to be executed as fn(*args, **kwargs) and returns
|
||||
a Future instance representing the execution of the callable.
|
||||
|
||||
Returns:
|
||||
A Future representing the given call.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def map(self, fn, *iterables, **kwargs):
|
||||
"""Returns a iterator equivalent to map(fn, iter).
|
||||
|
||||
Args:
|
||||
fn: A callable that will take as many arguments as there are
|
||||
passed iterables.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
|
||||
Returns:
|
||||
An iterator equivalent to: map(func, *iterables) but the calls may
|
||||
be evaluated out-of-order.
|
||||
|
||||
Raises:
|
||||
TimeoutError: If the entire result iterator could not be generated
|
||||
before the given timeout.
|
||||
Exception: If fn(*args) raises for any values.
|
||||
"""
|
||||
timeout = kwargs.get('timeout')
|
||||
if timeout is not None:
|
||||
end_time = timeout + time.time()
|
||||
|
||||
fs = [self.submit(fn, *args) for args in zip(*iterables)]
|
||||
|
||||
try:
|
||||
for future in fs:
|
||||
if timeout is None:
|
||||
yield future.result()
|
||||
else:
|
||||
yield future.result(end_time - time.time())
|
||||
finally:
|
||||
for future in fs:
|
||||
future.cancel()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
"""Clean-up the resources associated with the Executor.
|
||||
|
||||
It is safe to call this method several times. Otherwise, no other
|
||||
methods can be called after this one.
|
||||
|
||||
Args:
|
||||
wait: If True then shutdown will not return until all running
|
||||
futures have finished executing and the resources used by the
|
||||
executor have been reclaimed.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.shutdown(wait=True)
|
||||
return False
|
|
@ -1,111 +0,0 @@
|
|||
from keyword import iskeyword as _iskeyword
|
||||
from operator import itemgetter as _itemgetter
|
||||
import sys as _sys
|
||||
|
||||
|
||||
def namedtuple(typename, field_names):
|
||||
"""Returns a new subclass of tuple with named fields.
|
||||
|
||||
>>> Point = namedtuple('Point', 'x y')
|
||||
>>> Point.__doc__ # docstring for the new class
|
||||
'Point(x, y)'
|
||||
>>> p = Point(11, y=22) # instantiate with positional args or keywords
|
||||
>>> p[0] + p[1] # indexable like a plain tuple
|
||||
33
|
||||
>>> x, y = p # unpack like a regular tuple
|
||||
>>> x, y
|
||||
(11, 22)
|
||||
>>> p.x + p.y # fields also accessable by name
|
||||
33
|
||||
>>> d = p._asdict() # convert to a dictionary
|
||||
>>> d['x']
|
||||
11
|
||||
>>> Point(**d) # convert from a dictionary
|
||||
Point(x=11, y=22)
|
||||
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
|
||||
Point(x=100, y=22)
|
||||
|
||||
"""
|
||||
|
||||
# Parse and validate the field names. Validation serves two purposes,
|
||||
# generating informative error messages and preventing template injection attacks.
|
||||
if isinstance(field_names, basestring):
|
||||
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
|
||||
field_names = tuple(map(str, field_names))
|
||||
for name in (typename,) + field_names:
|
||||
if not all(c.isalnum() or c=='_' for c in name):
|
||||
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
|
||||
if _iskeyword(name):
|
||||
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
|
||||
if name[0].isdigit():
|
||||
raise ValueError('Type names and field names cannot start with a number: %r' % name)
|
||||
seen_names = set()
|
||||
for name in field_names:
|
||||
if name.startswith('_'):
|
||||
raise ValueError('Field names cannot start with an underscore: %r' % name)
|
||||
if name in seen_names:
|
||||
raise ValueError('Encountered duplicate field name: %r' % name)
|
||||
seen_names.add(name)
|
||||
|
||||
# Create and fill-in the class template
|
||||
numfields = len(field_names)
|
||||
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
|
||||
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
|
||||
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
|
||||
template = '''class %(typename)s(tuple):
|
||||
'%(typename)s(%(argtxt)s)' \n
|
||||
__slots__ = () \n
|
||||
_fields = %(field_names)r \n
|
||||
def __new__(_cls, %(argtxt)s):
|
||||
return _tuple.__new__(_cls, (%(argtxt)s)) \n
|
||||
@classmethod
|
||||
def _make(cls, iterable, new=tuple.__new__, len=len):
|
||||
'Make a new %(typename)s object from a sequence or iterable'
|
||||
result = new(cls, iterable)
|
||||
if len(result) != %(numfields)d:
|
||||
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
|
||||
return result \n
|
||||
def __repr__(self):
|
||||
return '%(typename)s(%(reprtxt)s)' %% self \n
|
||||
def _asdict(t):
|
||||
'Return a new dict which maps field names to their values'
|
||||
return {%(dicttxt)s} \n
|
||||
def _replace(_self, **kwds):
|
||||
'Return a new %(typename)s object replacing specified fields with new values'
|
||||
result = _self._make(map(kwds.pop, %(field_names)r, _self))
|
||||
if kwds:
|
||||
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
|
||||
return result \n
|
||||
def __getnewargs__(self):
|
||||
return tuple(self) \n\n''' % locals()
|
||||
for i, name in enumerate(field_names):
|
||||
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
|
||||
|
||||
# Execute the template string in a temporary namespace and
|
||||
# support tracing utilities by setting a value for frame.f_globals['__name__']
|
||||
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
|
||||
_property=property, _tuple=tuple)
|
||||
try:
|
||||
exec(template, namespace)
|
||||
except SyntaxError:
|
||||
e = _sys.exc_info()[1]
|
||||
raise SyntaxError(e.message + ':\n' + template)
|
||||
result = namespace[typename]
|
||||
|
||||
# For pickling to work, the __module__ variable needs to be set to the frame
|
||||
# where the named tuple is created. Bypass this step in enviroments where
|
||||
# sys._getframe is not defined (Jython for example).
|
||||
if hasattr(_sys, '_getframe'):
|
||||
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if _sys.version_info[0] < 3:
|
||||
def reraise(exc, traceback):
|
||||
locals_ = {'exc_type': type(exc), 'exc_value': exc, 'traceback': traceback}
|
||||
exec('raise exc_type, exc_value, traceback', {}, locals_)
|
||||
else:
|
||||
def reraise(exc, traceback):
|
||||
# Tracebacks are embedded in exceptions in Python 3
|
||||
raise exc
|
|
@ -1,363 +0,0 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Implements ProcessPoolExecutor.
|
||||
|
||||
The follow diagram and text describe the data-flow through the system:
|
||||
|
||||
|======================= In-process =====================|== Out-of-process ==|
|
||||
|
||||
+----------+ +----------+ +--------+ +-----------+ +---------+
|
||||
| | => | Work Ids | => | | => | Call Q | => | |
|
||||
| | +----------+ | | +-----------+ | |
|
||||
| | | ... | | | | ... | | |
|
||||
| | | 6 | | | | 5, call() | | |
|
||||
| | | 7 | | | | ... | | |
|
||||
| Process | | ... | | Local | +-----------+ | Process |
|
||||
| Pool | +----------+ | Worker | | #1..n |
|
||||
| Executor | | Thread | | |
|
||||
| | +----------- + | | +-----------+ | |
|
||||
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
|
||||
| | +------------+ | | +-----------+ | |
|
||||
| | | 6: call() | | | | ... | | |
|
||||
| | | future | | | | 4, result | | |
|
||||
| | | ... | | | | 3, except | | |
|
||||
+----------+ +------------+ +--------+ +-----------+ +---------+
|
||||
|
||||
Executor.submit() called:
|
||||
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
|
||||
- adds the id of the _WorkItem to the "Work Ids" queue
|
||||
|
||||
Local worker thread:
|
||||
- reads work ids from the "Work Ids" queue and looks up the corresponding
|
||||
WorkItem from the "Work Items" dict: if the work item has been cancelled then
|
||||
it is simply removed from the dict, otherwise it is repackaged as a
|
||||
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
|
||||
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
|
||||
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
|
||||
- reads _ResultItems from "Result Q", updates the future stored in the
|
||||
"Work Items" dict and deletes the dict entry
|
||||
|
||||
Process #1..n:
|
||||
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
|
||||
_ResultItems in "Request Q"
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
import atexit
|
||||
import multiprocessing
|
||||
import threading
|
||||
import weakref
|
||||
import sys
|
||||
|
||||
from concurrent.futures import _base
|
||||
|
||||
try:
|
||||
import queue
|
||||
except ImportError:
|
||||
import Queue as queue
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
# Workers are created as daemon threads and processes. This is done to allow the
|
||||
# interpreter to exit when there are still idle processes in a
|
||||
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
|
||||
# allowing workers to die with the interpreter has two undesirable properties:
|
||||
# - The workers would still be running during interpretor shutdown,
|
||||
# meaning that they would fail in unpredictable ways.
|
||||
# - The workers could be killed while evaluating a work item, which could
|
||||
# be bad if the callable being evaluated has external side-effects e.g.
|
||||
# writing to a file.
|
||||
#
|
||||
# To work around this problem, an exit handler is installed which tells the
|
||||
# workers to exit when their work queues are empty and then waits until the
|
||||
# threads/processes finish.
|
||||
|
||||
_threads_queues = weakref.WeakKeyDictionary()
|
||||
_shutdown = False
|
||||
|
||||
def _python_exit():
|
||||
global _shutdown
|
||||
_shutdown = True
|
||||
items = list(_threads_queues.items())
|
||||
for t, q in items:
|
||||
q.put(None)
|
||||
for t, q in items:
|
||||
t.join()
|
||||
|
||||
# Controls how many more calls than processes will be queued in the call queue.
|
||||
# A smaller number will mean that processes spend more time idle waiting for
|
||||
# work while a larger number will make Future.cancel() succeed less frequently
|
||||
# (Futures in the call queue cannot be cancelled).
|
||||
EXTRA_QUEUED_CALLS = 1
|
||||
|
||||
class _WorkItem(object):
|
||||
def __init__(self, future, fn, args, kwargs):
|
||||
self.future = future
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
class _ResultItem(object):
|
||||
def __init__(self, work_id, exception=None, result=None):
|
||||
self.work_id = work_id
|
||||
self.exception = exception
|
||||
self.result = result
|
||||
|
||||
class _CallItem(object):
|
||||
def __init__(self, work_id, fn, args, kwargs):
|
||||
self.work_id = work_id
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def _process_worker(call_queue, result_queue):
|
||||
"""Evaluates calls from call_queue and places the results in result_queue.
|
||||
|
||||
This worker is run in a separate process.
|
||||
|
||||
Args:
|
||||
call_queue: A multiprocessing.Queue of _CallItems that will be read and
|
||||
evaluated by the worker.
|
||||
result_queue: A multiprocessing.Queue of _ResultItems that will written
|
||||
to by the worker.
|
||||
shutdown: A multiprocessing.Event that will be set as a signal to the
|
||||
worker that it should exit when call_queue is empty.
|
||||
"""
|
||||
while True:
|
||||
call_item = call_queue.get(block=True)
|
||||
if call_item is None:
|
||||
# Wake up queue management thread
|
||||
result_queue.put(None)
|
||||
return
|
||||
try:
|
||||
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
||||
except BaseException:
|
||||
e = sys.exc_info()[1]
|
||||
result_queue.put(_ResultItem(call_item.work_id,
|
||||
exception=e))
|
||||
else:
|
||||
result_queue.put(_ResultItem(call_item.work_id,
|
||||
result=r))
|
||||
|
||||
def _add_call_item_to_queue(pending_work_items,
|
||||
work_ids,
|
||||
call_queue):
|
||||
"""Fills call_queue with _WorkItems from pending_work_items.
|
||||
|
||||
This function never blocks.
|
||||
|
||||
Args:
|
||||
pending_work_items: A dict mapping work ids to _WorkItems e.g.
|
||||
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
||||
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
|
||||
are consumed and the corresponding _WorkItems from
|
||||
pending_work_items are transformed into _CallItems and put in
|
||||
call_queue.
|
||||
call_queue: A multiprocessing.Queue that will be filled with _CallItems
|
||||
derived from _WorkItems.
|
||||
"""
|
||||
while True:
|
||||
if call_queue.full():
|
||||
return
|
||||
try:
|
||||
work_id = work_ids.get(block=False)
|
||||
except queue.Empty:
|
||||
return
|
||||
else:
|
||||
work_item = pending_work_items[work_id]
|
||||
|
||||
if work_item.future.set_running_or_notify_cancel():
|
||||
call_queue.put(_CallItem(work_id,
|
||||
work_item.fn,
|
||||
work_item.args,
|
||||
work_item.kwargs),
|
||||
block=True)
|
||||
else:
|
||||
del pending_work_items[work_id]
|
||||
continue
|
||||
|
||||
def _queue_management_worker(executor_reference,
|
||||
processes,
|
||||
pending_work_items,
|
||||
work_ids_queue,
|
||||
call_queue,
|
||||
result_queue):
|
||||
"""Manages the communication between this process and the worker processes.
|
||||
|
||||
This function is run in a local thread.
|
||||
|
||||
Args:
|
||||
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
|
||||
this thread. Used to determine if the ProcessPoolExecutor has been
|
||||
garbage collected and that this function can exit.
|
||||
process: A list of the multiprocessing.Process instances used as
|
||||
workers.
|
||||
pending_work_items: A dict mapping work ids to _WorkItems e.g.
|
||||
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
||||
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
|
||||
call_queue: A multiprocessing.Queue that will be filled with _CallItems
|
||||
derived from _WorkItems for processing by the process workers.
|
||||
result_queue: A multiprocessing.Queue of _ResultItems generated by the
|
||||
process workers.
|
||||
"""
|
||||
nb_shutdown_processes = [0]
|
||||
def shutdown_one_process():
|
||||
"""Tell a worker to terminate, which will in turn wake us again"""
|
||||
call_queue.put(None)
|
||||
nb_shutdown_processes[0] += 1
|
||||
while True:
|
||||
_add_call_item_to_queue(pending_work_items,
|
||||
work_ids_queue,
|
||||
call_queue)
|
||||
|
||||
result_item = result_queue.get(block=True)
|
||||
if result_item is not None:
|
||||
work_item = pending_work_items[result_item.work_id]
|
||||
del pending_work_items[result_item.work_id]
|
||||
|
||||
if result_item.exception:
|
||||
work_item.future.set_exception(result_item.exception)
|
||||
else:
|
||||
work_item.future.set_result(result_item.result)
|
||||
# Check whether we should start shutting down.
|
||||
executor = executor_reference()
|
||||
# No more work items can be added if:
|
||||
# - The interpreter is shutting down OR
|
||||
# - The executor that owns this worker has been collected OR
|
||||
# - The executor that owns this worker has been shutdown.
|
||||
if _shutdown or executor is None or executor._shutdown_thread:
|
||||
# Since no new work items can be added, it is safe to shutdown
|
||||
# this thread if there are no pending work items.
|
||||
if not pending_work_items:
|
||||
while nb_shutdown_processes[0] < len(processes):
|
||||
shutdown_one_process()
|
||||
# If .join() is not called on the created processes then
|
||||
# some multiprocessing.Queue methods may deadlock on Mac OS
|
||||
# X.
|
||||
for p in processes:
|
||||
p.join()
|
||||
call_queue.close()
|
||||
return
|
||||
del executor
|
||||
|
||||
_system_limits_checked = False
|
||||
_system_limited = None
|
||||
def _check_system_limits():
|
||||
global _system_limits_checked, _system_limited
|
||||
if _system_limits_checked:
|
||||
if _system_limited:
|
||||
raise NotImplementedError(_system_limited)
|
||||
_system_limits_checked = True
|
||||
try:
|
||||
import os
|
||||
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
|
||||
except (AttributeError, ValueError):
|
||||
# sysconf not available or setting not available
|
||||
return
|
||||
if nsems_max == -1:
|
||||
# indetermine limit, assume that limit is determined
|
||||
# by available memory only
|
||||
return
|
||||
if nsems_max >= 256:
|
||||
# minimum number of semaphores available
|
||||
# according to POSIX
|
||||
return
|
||||
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
|
||||
raise NotImplementedError(_system_limited)
|
||||
|
||||
class ProcessPoolExecutor(_base.Executor):
|
||||
def __init__(self, max_workers=None):
|
||||
"""Initializes a new ProcessPoolExecutor instance.
|
||||
|
||||
Args:
|
||||
max_workers: The maximum number of processes that can be used to
|
||||
execute the given calls. If None or not given then as many
|
||||
worker processes will be created as the machine has processors.
|
||||
"""
|
||||
_check_system_limits()
|
||||
|
||||
if max_workers is None:
|
||||
self._max_workers = multiprocessing.cpu_count()
|
||||
else:
|
||||
self._max_workers = max_workers
|
||||
|
||||
# Make the call queue slightly larger than the number of processes to
|
||||
# prevent the worker processes from idling. But don't make it too big
|
||||
# because futures in the call queue cannot be cancelled.
|
||||
self._call_queue = multiprocessing.Queue(self._max_workers +
|
||||
EXTRA_QUEUED_CALLS)
|
||||
self._result_queue = multiprocessing.Queue()
|
||||
self._work_ids = queue.Queue()
|
||||
self._queue_management_thread = None
|
||||
self._processes = set()
|
||||
|
||||
# Shutdown is a two-step process.
|
||||
self._shutdown_thread = False
|
||||
self._shutdown_lock = threading.Lock()
|
||||
self._queue_count = 0
|
||||
self._pending_work_items = {}
|
||||
|
||||
def _start_queue_management_thread(self):
|
||||
# When the executor gets lost, the weakref callback will wake up
|
||||
# the queue management thread.
|
||||
def weakref_cb(_, q=self._result_queue):
|
||||
q.put(None)
|
||||
if self._queue_management_thread is None:
|
||||
self._queue_management_thread = threading.Thread(
|
||||
target=_queue_management_worker,
|
||||
args=(weakref.ref(self, weakref_cb),
|
||||
self._processes,
|
||||
self._pending_work_items,
|
||||
self._work_ids,
|
||||
self._call_queue,
|
||||
self._result_queue))
|
||||
self._queue_management_thread.daemon = True
|
||||
self._queue_management_thread.start()
|
||||
_threads_queues[self._queue_management_thread] = self._result_queue
|
||||
|
||||
def _adjust_process_count(self):
|
||||
for _ in range(len(self._processes), self._max_workers):
|
||||
p = multiprocessing.Process(
|
||||
target=_process_worker,
|
||||
args=(self._call_queue,
|
||||
self._result_queue))
|
||||
p.start()
|
||||
self._processes.add(p)
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
with self._shutdown_lock:
|
||||
if self._shutdown_thread:
|
||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
||||
|
||||
f = _base.Future()
|
||||
w = _WorkItem(f, fn, args, kwargs)
|
||||
|
||||
self._pending_work_items[self._queue_count] = w
|
||||
self._work_ids.put(self._queue_count)
|
||||
self._queue_count += 1
|
||||
# Wake up queue management thread
|
||||
self._result_queue.put(None)
|
||||
|
||||
self._start_queue_management_thread()
|
||||
self._adjust_process_count()
|
||||
return f
|
||||
submit.__doc__ = _base.Executor.submit.__doc__
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
with self._shutdown_lock:
|
||||
self._shutdown_thread = True
|
||||
if self._queue_management_thread:
|
||||
# Wake up queue management thread
|
||||
self._result_queue.put(None)
|
||||
if wait:
|
||||
self._queue_management_thread.join()
|
||||
# To reduce the risk of openning too many files, remove references to
|
||||
# objects that use file descriptors.
|
||||
self._queue_management_thread = None
|
||||
self._call_queue = None
|
||||
self._result_queue = None
|
||||
self._processes = None
|
||||
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
||||
|
||||
atexit.register(_python_exit)
|
|
@ -1,138 +0,0 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Implements ThreadPoolExecutor."""
|
||||
|
||||
from __future__ import with_statement
|
||||
import atexit
|
||||
import threading
|
||||
import weakref
|
||||
import sys
|
||||
|
||||
from concurrent.futures import _base
|
||||
|
||||
try:
|
||||
import queue
|
||||
except ImportError:
|
||||
import Queue as queue
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
# Workers are created as daemon threads. This is done to allow the interpreter
|
||||
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
|
||||
# pool (i.e. shutdown() was not called). However, allowing workers to die with
|
||||
# the interpreter has two undesirable properties:
|
||||
# - The workers would still be running during interpretor shutdown,
|
||||
# meaning that they would fail in unpredictable ways.
|
||||
# - The workers could be killed while evaluating a work item, which could
|
||||
# be bad if the callable being evaluated has external side-effects e.g.
|
||||
# writing to a file.
|
||||
#
|
||||
# To work around this problem, an exit handler is installed which tells the
|
||||
# workers to exit when their work queues are empty and then waits until the
|
||||
# threads finish.
|
||||
|
||||
_threads_queues = weakref.WeakKeyDictionary()
|
||||
_shutdown = False
|
||||
|
||||
def _python_exit():
|
||||
global _shutdown
|
||||
_shutdown = True
|
||||
items = list(_threads_queues.items())
|
||||
for t, q in items:
|
||||
q.put(None)
|
||||
for t, q in items:
|
||||
t.join()
|
||||
|
||||
atexit.register(_python_exit)
|
||||
|
||||
class _WorkItem(object):
|
||||
def __init__(self, future, fn, args, kwargs):
|
||||
self.future = future
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def run(self):
|
||||
if not self.future.set_running_or_notify_cancel():
|
||||
return
|
||||
|
||||
try:
|
||||
result = self.fn(*self.args, **self.kwargs)
|
||||
except BaseException:
|
||||
e, tb = sys.exc_info()[1:]
|
||||
self.future.set_exception_info(e, tb)
|
||||
else:
|
||||
self.future.set_result(result)
|
||||
|
||||
def _worker(executor_reference, work_queue):
|
||||
try:
|
||||
while True:
|
||||
work_item = work_queue.get(block=True)
|
||||
if work_item is not None:
|
||||
work_item.run()
|
||||
continue
|
||||
executor = executor_reference()
|
||||
# Exit if:
|
||||
# - The interpreter is shutting down OR
|
||||
# - The executor that owns the worker has been collected OR
|
||||
# - The executor that owns the worker has been shutdown.
|
||||
if _shutdown or executor is None or executor._shutdown:
|
||||
# Notice other workers
|
||||
work_queue.put(None)
|
||||
return
|
||||
del executor
|
||||
except BaseException:
|
||||
_base.LOGGER.critical('Exception in worker', exc_info=True)
|
||||
|
||||
class ThreadPoolExecutor(_base.Executor):
|
||||
def __init__(self, max_workers):
|
||||
"""Initializes a new ThreadPoolExecutor instance.
|
||||
|
||||
Args:
|
||||
max_workers: The maximum number of threads that can be used to
|
||||
execute the given calls.
|
||||
"""
|
||||
self._max_workers = max_workers
|
||||
self._work_queue = queue.Queue()
|
||||
self._threads = set()
|
||||
self._shutdown = False
|
||||
self._shutdown_lock = threading.Lock()
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
with self._shutdown_lock:
|
||||
if self._shutdown:
|
||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
||||
|
||||
f = _base.Future()
|
||||
w = _WorkItem(f, fn, args, kwargs)
|
||||
|
||||
self._work_queue.put(w)
|
||||
self._adjust_thread_count()
|
||||
return f
|
||||
submit.__doc__ = _base.Executor.submit.__doc__
|
||||
|
||||
def _adjust_thread_count(self):
|
||||
# When the executor gets lost, the weakref callback will wake up
|
||||
# the worker threads.
|
||||
def weakref_cb(_, q=self._work_queue):
|
||||
q.put(None)
|
||||
# TODO(bquinlan): Should avoid creating new threads if there are more
|
||||
# idle threads than items in the work queue.
|
||||
if len(self._threads) < self._max_workers:
|
||||
t = threading.Thread(target=_worker,
|
||||
args=(weakref.ref(self, weakref_cb),
|
||||
self._work_queue))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self._threads.add(t)
|
||||
_threads_queues[t] = self._work_queue
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
with self._shutdown_lock:
|
||||
self._shutdown = True
|
||||
self._work_queue.put(None)
|
||||
if wait:
|
||||
for t in self._threads:
|
||||
t.join()
|
||||
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
|
@ -1,829 +0,0 @@
|
|||
# Copyright 2001-2013 Python Software Foundation; All Rights Reserved
|
||||
"""Function signature objects for callables
|
||||
|
||||
Back port of Python 3.3's function signature tools from the inspect module,
|
||||
modified to be compatible with Python 2.6, 2.7 and 3.3+.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import itertools
|
||||
import functools
|
||||
import re
|
||||
import types
|
||||
|
||||
try:
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from ordereddict import OrderedDict
|
||||
|
||||
from funcsigs.version import __version__
|
||||
|
||||
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
|
||||
|
||||
|
||||
_WrapperDescriptor = type(type.__call__)
|
||||
_MethodWrapper = type(all.__call__)
|
||||
|
||||
_NonUserDefinedCallables = (_WrapperDescriptor,
|
||||
_MethodWrapper,
|
||||
types.BuiltinFunctionType)
|
||||
|
||||
|
||||
def formatannotation(annotation, base_module=None):
|
||||
if isinstance(annotation, type):
|
||||
if annotation.__module__ in ('builtins', '__builtin__', base_module):
|
||||
return annotation.__name__
|
||||
return annotation.__module__+'.'+annotation.__name__
|
||||
return repr(annotation)
|
||||
|
||||
|
||||
def _get_user_defined_method(cls, method_name, *nested):
|
||||
try:
|
||||
if cls is type:
|
||||
return
|
||||
meth = getattr(cls, method_name)
|
||||
for name in nested:
|
||||
meth = getattr(meth, name, meth)
|
||||
except AttributeError:
|
||||
return
|
||||
else:
|
||||
if not isinstance(meth, _NonUserDefinedCallables):
|
||||
# Once '__signature__' will be added to 'C'-level
|
||||
# callables, this check won't be necessary
|
||||
return meth
|
||||
|
||||
|
||||
def signature(obj):
|
||||
'''Get a signature object for the passed callable.'''
|
||||
|
||||
if not callable(obj):
|
||||
raise TypeError('{0!r} is not a callable object'.format(obj))
|
||||
|
||||
if isinstance(obj, types.MethodType):
|
||||
sig = signature(obj.__func__)
|
||||
if obj.__self__ is None:
|
||||
# Unbound method - preserve as-is.
|
||||
return sig
|
||||
else:
|
||||
# Bound method. Eat self - if we can.
|
||||
params = tuple(sig.parameters.values())
|
||||
|
||||
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
|
||||
raise ValueError('invalid method signature')
|
||||
|
||||
kind = params[0].kind
|
||||
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
|
||||
# Drop first parameter:
|
||||
# '(p1, p2[, ...])' -> '(p2[, ...])'
|
||||
params = params[1:]
|
||||
else:
|
||||
if kind is not _VAR_POSITIONAL:
|
||||
# Unless we add a new parameter type we never
|
||||
# get here
|
||||
raise ValueError('invalid argument type')
|
||||
# It's a var-positional parameter.
|
||||
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
|
||||
|
||||
return sig.replace(parameters=params)
|
||||
|
||||
try:
|
||||
sig = obj.__signature__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if sig is not None:
|
||||
return sig
|
||||
|
||||
try:
|
||||
# Was this function wrapped by a decorator?
|
||||
wrapped = obj.__wrapped__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
return signature(wrapped)
|
||||
|
||||
if isinstance(obj, types.FunctionType):
|
||||
return Signature.from_function(obj)
|
||||
|
||||
if isinstance(obj, functools.partial):
|
||||
sig = signature(obj.func)
|
||||
|
||||
new_params = OrderedDict(sig.parameters.items())
|
||||
|
||||
partial_args = obj.args or ()
|
||||
partial_keywords = obj.keywords or {}
|
||||
try:
|
||||
ba = sig.bind_partial(*partial_args, **partial_keywords)
|
||||
except TypeError as ex:
|
||||
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
|
||||
raise ValueError(msg)
|
||||
|
||||
for arg_name, arg_value in ba.arguments.items():
|
||||
param = new_params[arg_name]
|
||||
if arg_name in partial_keywords:
|
||||
# We set a new default value, because the following code
|
||||
# is correct:
|
||||
#
|
||||
# >>> def foo(a): print(a)
|
||||
# >>> print(partial(partial(foo, a=10), a=20)())
|
||||
# 20
|
||||
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
|
||||
# 30
|
||||
#
|
||||
# So, with 'partial' objects, passing a keyword argument is
|
||||
# like setting a new default value for the corresponding
|
||||
# parameter
|
||||
#
|
||||
# We also mark this parameter with '_partial_kwarg'
|
||||
# flag. Later, in '_bind', the 'default' value of this
|
||||
# parameter will be added to 'kwargs', to simulate
|
||||
# the 'functools.partial' real call.
|
||||
new_params[arg_name] = param.replace(default=arg_value,
|
||||
_partial_kwarg=True)
|
||||
|
||||
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
|
||||
not param._partial_kwarg):
|
||||
new_params.pop(arg_name)
|
||||
|
||||
return sig.replace(parameters=new_params.values())
|
||||
|
||||
sig = None
|
||||
if isinstance(obj, type):
|
||||
# obj is a class or a metaclass
|
||||
|
||||
# First, let's see if it has an overloaded __call__ defined
|
||||
# in its metaclass
|
||||
call = _get_user_defined_method(type(obj), '__call__')
|
||||
if call is not None:
|
||||
sig = signature(call)
|
||||
else:
|
||||
# Now we check if the 'obj' class has a '__new__' method
|
||||
new = _get_user_defined_method(obj, '__new__')
|
||||
if new is not None:
|
||||
sig = signature(new)
|
||||
else:
|
||||
# Finally, we should have at least __init__ implemented
|
||||
init = _get_user_defined_method(obj, '__init__')
|
||||
if init is not None:
|
||||
sig = signature(init)
|
||||
elif not isinstance(obj, _NonUserDefinedCallables):
|
||||
# An object with __call__
|
||||
# We also check that the 'obj' is not an instance of
|
||||
# _WrapperDescriptor or _MethodWrapper to avoid
|
||||
# infinite recursion (and even potential segfault)
|
||||
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
|
||||
if call is not None:
|
||||
sig = signature(call)
|
||||
|
||||
if sig is not None:
|
||||
# For classes and objects we skip the first parameter of their
|
||||
# __call__, __new__, or __init__ methods
|
||||
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
|
||||
|
||||
if isinstance(obj, types.BuiltinFunctionType):
|
||||
# Raise a nicer error message for builtins
|
||||
msg = 'no signature found for builtin function {0!r}'.format(obj)
|
||||
raise ValueError(msg)
|
||||
|
||||
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
|
||||
|
||||
|
||||
class _void(object):
|
||||
'''A private marker - used in Parameter & Signature'''
|
||||
|
||||
|
||||
class _empty(object):
|
||||
pass
|
||||
|
||||
|
||||
class _ParameterKind(int):
|
||||
def __new__(self, *args, **kwargs):
|
||||
obj = int.__new__(self, *args)
|
||||
obj._name = kwargs['name']
|
||||
return obj
|
||||
|
||||
def __str__(self):
|
||||
return self._name
|
||||
|
||||
def __repr__(self):
|
||||
return '<_ParameterKind: {0!r}>'.format(self._name)
|
||||
|
||||
|
||||
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
|
||||
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
|
||||
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
|
||||
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
|
||||
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
|
||||
|
||||
|
||||
class Parameter(object):
|
||||
'''Represents a parameter in a function signature.
|
||||
|
||||
Has the following public attributes:
|
||||
|
||||
* name : str
|
||||
The name of the parameter as a string.
|
||||
* default : object
|
||||
The default value for the parameter if specified. If the
|
||||
parameter has no default value, this attribute is not set.
|
||||
* annotation
|
||||
The annotation for the parameter if specified. If the
|
||||
parameter has no annotation, this attribute is not set.
|
||||
* kind : str
|
||||
Describes how argument values are bound to the parameter.
|
||||
Possible values: `Parameter.POSITIONAL_ONLY`,
|
||||
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
|
||||
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
|
||||
'''
|
||||
|
||||
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
|
||||
|
||||
POSITIONAL_ONLY = _POSITIONAL_ONLY
|
||||
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
|
||||
VAR_POSITIONAL = _VAR_POSITIONAL
|
||||
KEYWORD_ONLY = _KEYWORD_ONLY
|
||||
VAR_KEYWORD = _VAR_KEYWORD
|
||||
|
||||
empty = _empty
|
||||
|
||||
def __init__(self, name, kind, default=_empty, annotation=_empty,
|
||||
_partial_kwarg=False):
|
||||
|
||||
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
|
||||
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
|
||||
raise ValueError("invalid value for 'Parameter.kind' attribute")
|
||||
self._kind = kind
|
||||
|
||||
if default is not _empty:
|
||||
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
|
||||
msg = '{0} parameters cannot have default values'.format(kind)
|
||||
raise ValueError(msg)
|
||||
self._default = default
|
||||
self._annotation = annotation
|
||||
|
||||
if name is None:
|
||||
if kind != _POSITIONAL_ONLY:
|
||||
raise ValueError("None is not a valid name for a "
|
||||
"non-positional-only parameter")
|
||||
self._name = name
|
||||
else:
|
||||
name = str(name)
|
||||
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
|
||||
msg = '{0!r} is not a valid parameter name'.format(name)
|
||||
raise ValueError(msg)
|
||||
self._name = name
|
||||
|
||||
self._partial_kwarg = _partial_kwarg
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
return self._default
|
||||
|
||||
@property
|
||||
def annotation(self):
|
||||
return self._annotation
|
||||
|
||||
@property
|
||||
def kind(self):
|
||||
return self._kind
|
||||
|
||||
def replace(self, name=_void, kind=_void, annotation=_void,
|
||||
default=_void, _partial_kwarg=_void):
|
||||
'''Creates a customized copy of the Parameter.'''
|
||||
|
||||
if name is _void:
|
||||
name = self._name
|
||||
|
||||
if kind is _void:
|
||||
kind = self._kind
|
||||
|
||||
if annotation is _void:
|
||||
annotation = self._annotation
|
||||
|
||||
if default is _void:
|
||||
default = self._default
|
||||
|
||||
if _partial_kwarg is _void:
|
||||
_partial_kwarg = self._partial_kwarg
|
||||
|
||||
return type(self)(name, kind, default=default, annotation=annotation,
|
||||
_partial_kwarg=_partial_kwarg)
|
||||
|
||||
def __str__(self):
|
||||
kind = self.kind
|
||||
|
||||
formatted = self._name
|
||||
if kind == _POSITIONAL_ONLY:
|
||||
if formatted is None:
|
||||
formatted = ''
|
||||
formatted = '<{0}>'.format(formatted)
|
||||
|
||||
# Add annotation and default value
|
||||
if self._annotation is not _empty:
|
||||
formatted = '{0}:{1}'.format(formatted,
|
||||
formatannotation(self._annotation))
|
||||
|
||||
if self._default is not _empty:
|
||||
formatted = '{0}={1}'.format(formatted, repr(self._default))
|
||||
|
||||
if kind == _VAR_POSITIONAL:
|
||||
formatted = '*' + formatted
|
||||
elif kind == _VAR_KEYWORD:
|
||||
formatted = '**' + formatted
|
||||
|
||||
return formatted
|
||||
|
||||
def __repr__(self):
|
||||
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
|
||||
id(self), self.name)
|
||||
|
||||
def __hash__(self):
|
||||
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
|
||||
raise TypeError(msg)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (issubclass(other.__class__, Parameter) and
|
||||
self._name == other._name and
|
||||
self._kind == other._kind and
|
||||
self._default == other._default and
|
||||
self._annotation == other._annotation)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class BoundArguments(object):
|
||||
'''Result of `Signature.bind` call. Holds the mapping of arguments
|
||||
to the function's parameters.
|
||||
|
||||
Has the following public attributes:
|
||||
|
||||
* arguments : OrderedDict
|
||||
An ordered mutable mapping of parameters' names to arguments' values.
|
||||
Does not contain arguments' default values.
|
||||
* signature : Signature
|
||||
The Signature object that created this instance.
|
||||
* args : tuple
|
||||
Tuple of positional arguments values.
|
||||
* kwargs : dict
|
||||
Dict of keyword arguments values.
|
||||
'''
|
||||
|
||||
def __init__(self, signature, arguments):
|
||||
self.arguments = arguments
|
||||
self._signature = signature
|
||||
|
||||
@property
|
||||
def signature(self):
|
||||
return self._signature
|
||||
|
||||
@property
|
||||
def args(self):
|
||||
args = []
|
||||
for param_name, param in self._signature.parameters.items():
|
||||
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
|
||||
param._partial_kwarg):
|
||||
# Keyword arguments mapped by 'functools.partial'
|
||||
# (Parameter._partial_kwarg is True) are mapped
|
||||
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
|
||||
# KEYWORD_ONLY
|
||||
break
|
||||
|
||||
try:
|
||||
arg = self.arguments[param_name]
|
||||
except KeyError:
|
||||
# We're done here. Other arguments
|
||||
# will be mapped in 'BoundArguments.kwargs'
|
||||
break
|
||||
else:
|
||||
if param.kind == _VAR_POSITIONAL:
|
||||
# *args
|
||||
args.extend(arg)
|
||||
else:
|
||||
# plain argument
|
||||
args.append(arg)
|
||||
|
||||
return tuple(args)
|
||||
|
||||
@property
|
||||
def kwargs(self):
|
||||
kwargs = {}
|
||||
kwargs_started = False
|
||||
for param_name, param in self._signature.parameters.items():
|
||||
if not kwargs_started:
|
||||
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
|
||||
param._partial_kwarg):
|
||||
kwargs_started = True
|
||||
else:
|
||||
if param_name not in self.arguments:
|
||||
kwargs_started = True
|
||||
continue
|
||||
|
||||
if not kwargs_started:
|
||||
continue
|
||||
|
||||
try:
|
||||
arg = self.arguments[param_name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if param.kind == _VAR_KEYWORD:
|
||||
# **kwargs
|
||||
kwargs.update(arg)
|
||||
else:
|
||||
# plain keyword argument
|
||||
kwargs[param_name] = arg
|
||||
|
||||
return kwargs
|
||||
|
||||
def __hash__(self):
|
||||
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
|
||||
raise TypeError(msg)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (issubclass(other.__class__, BoundArguments) and
|
||||
self.signature == other.signature and
|
||||
self.arguments == other.arguments)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class Signature(object):
|
||||
'''A Signature object represents the overall signature of a function.
|
||||
It stores a Parameter object for each parameter accepted by the
|
||||
function, as well as information specific to the function itself.
|
||||
|
||||
A Signature object has the following public attributes and methods:
|
||||
|
||||
* parameters : OrderedDict
|
||||
An ordered mapping of parameters' names to the corresponding
|
||||
Parameter objects (keyword-only arguments are in the same order
|
||||
as listed in `code.co_varnames`).
|
||||
* return_annotation : object
|
||||
The annotation for the return type of the function if specified.
|
||||
If the function has no annotation for its return type, this
|
||||
attribute is not set.
|
||||
* bind(*args, **kwargs) -> BoundArguments
|
||||
Creates a mapping from positional and keyword arguments to
|
||||
parameters.
|
||||
* bind_partial(*args, **kwargs) -> BoundArguments
|
||||
Creates a partial mapping from positional and keyword arguments
|
||||
to parameters (simulating 'functools.partial' behavior.)
|
||||
'''
|
||||
|
||||
__slots__ = ('_return_annotation', '_parameters')
|
||||
|
||||
_parameter_cls = Parameter
|
||||
_bound_arguments_cls = BoundArguments
|
||||
|
||||
empty = _empty
|
||||
|
||||
def __init__(self, parameters=None, return_annotation=_empty,
|
||||
__validate_parameters__=True):
|
||||
'''Constructs Signature from the given list of Parameter
|
||||
objects and 'return_annotation'. All arguments are optional.
|
||||
'''
|
||||
|
||||
if parameters is None:
|
||||
params = OrderedDict()
|
||||
else:
|
||||
if __validate_parameters__:
|
||||
params = OrderedDict()
|
||||
top_kind = _POSITIONAL_ONLY
|
||||
|
||||
for idx, param in enumerate(parameters):
|
||||
kind = param.kind
|
||||
if kind < top_kind:
|
||||
msg = 'wrong parameter order: {0} before {1}'
|
||||
msg = msg.format(top_kind, param.kind)
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
top_kind = kind
|
||||
|
||||
name = param.name
|
||||
if name is None:
|
||||
name = str(idx)
|
||||
param = param.replace(name=name)
|
||||
|
||||
if name in params:
|
||||
msg = 'duplicate parameter name: {0!r}'.format(name)
|
||||
raise ValueError(msg)
|
||||
params[name] = param
|
||||
else:
|
||||
params = OrderedDict(((param.name, param)
|
||||
for param in parameters))
|
||||
|
||||
self._parameters = params
|
||||
self._return_annotation = return_annotation
|
||||
|
||||
@classmethod
|
||||
def from_function(cls, func):
|
||||
'''Constructs Signature for the given python function'''
|
||||
|
||||
if not isinstance(func, types.FunctionType):
|
||||
raise TypeError('{0!r} is not a Python function'.format(func))
|
||||
|
||||
Parameter = cls._parameter_cls
|
||||
|
||||
# Parameter information.
|
||||
func_code = func.__code__
|
||||
pos_count = func_code.co_argcount
|
||||
arg_names = func_code.co_varnames
|
||||
positional = tuple(arg_names[:pos_count])
|
||||
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
|
||||
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
|
||||
annotations = getattr(func, '__annotations__', {})
|
||||
defaults = func.__defaults__
|
||||
kwdefaults = getattr(func, '__kwdefaults__', None)
|
||||
|
||||
if defaults:
|
||||
pos_default_count = len(defaults)
|
||||
else:
|
||||
pos_default_count = 0
|
||||
|
||||
parameters = []
|
||||
|
||||
# Non-keyword-only parameters w/o defaults.
|
||||
non_default_count = pos_count - pos_default_count
|
||||
for name in positional[:non_default_count]:
|
||||
annotation = annotations.get(name, _empty)
|
||||
parameters.append(Parameter(name, annotation=annotation,
|
||||
kind=_POSITIONAL_OR_KEYWORD))
|
||||
|
||||
# ... w/ defaults.
|
||||
for offset, name in enumerate(positional[non_default_count:]):
|
||||
annotation = annotations.get(name, _empty)
|
||||
parameters.append(Parameter(name, annotation=annotation,
|
||||
kind=_POSITIONAL_OR_KEYWORD,
|
||||
default=defaults[offset]))
|
||||
|
||||
# *args
|
||||
if func_code.co_flags & 0x04:
|
||||
name = arg_names[pos_count + keyword_only_count]
|
||||
annotation = annotations.get(name, _empty)
|
||||
parameters.append(Parameter(name, annotation=annotation,
|
||||
kind=_VAR_POSITIONAL))
|
||||
|
||||
# Keyword-only parameters.
|
||||
for name in keyword_only:
|
||||
default = _empty
|
||||
if kwdefaults is not None:
|
||||
default = kwdefaults.get(name, _empty)
|
||||
|
||||
annotation = annotations.get(name, _empty)
|
||||
parameters.append(Parameter(name, annotation=annotation,
|
||||
kind=_KEYWORD_ONLY,
|
||||
default=default))
|
||||
# **kwargs
|
||||
if func_code.co_flags & 0x08:
|
||||
index = pos_count + keyword_only_count
|
||||
if func_code.co_flags & 0x04:
|
||||
index += 1
|
||||
|
||||
name = arg_names[index]
|
||||
annotation = annotations.get(name, _empty)
|
||||
parameters.append(Parameter(name, annotation=annotation,
|
||||
kind=_VAR_KEYWORD))
|
||||
|
||||
return cls(parameters,
|
||||
return_annotation=annotations.get('return', _empty),
|
||||
__validate_parameters__=False)
|
||||
|
||||
@property
|
||||
def parameters(self):
|
||||
try:
|
||||
return types.MappingProxyType(self._parameters)
|
||||
except AttributeError:
|
||||
return OrderedDict(self._parameters.items())
|
||||
|
||||
@property
|
||||
def return_annotation(self):
|
||||
return self._return_annotation
|
||||
|
||||
def replace(self, parameters=_void, return_annotation=_void):
|
||||
'''Creates a customized copy of the Signature.
|
||||
Pass 'parameters' and/or 'return_annotation' arguments
|
||||
to override them in the new copy.
|
||||
'''
|
||||
|
||||
if parameters is _void:
|
||||
parameters = self.parameters.values()
|
||||
|
||||
if return_annotation is _void:
|
||||
return_annotation = self._return_annotation
|
||||
|
||||
return type(self)(parameters,
|
||||
return_annotation=return_annotation)
|
||||
|
||||
def __hash__(self):
|
||||
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
|
||||
raise TypeError(msg)
|
||||
|
||||
def __eq__(self, other):
|
||||
if (not issubclass(type(other), Signature) or
|
||||
self.return_annotation != other.return_annotation or
|
||||
len(self.parameters) != len(other.parameters)):
|
||||
return False
|
||||
|
||||
other_positions = dict((param, idx)
|
||||
for idx, param in enumerate(other.parameters.keys()))
|
||||
|
||||
for idx, (param_name, param) in enumerate(self.parameters.items()):
|
||||
if param.kind == _KEYWORD_ONLY:
|
||||
try:
|
||||
other_param = other.parameters[param_name]
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
if param != other_param:
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
other_idx = other_positions[param_name]
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
if (idx != other_idx or
|
||||
param != other.parameters[param_name]):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def _bind(self, args, kwargs, partial=False):
|
||||
'''Private method. Don't use directly.'''
|
||||
|
||||
arguments = OrderedDict()
|
||||
|
||||
parameters = iter(self.parameters.values())
|
||||
parameters_ex = ()
|
||||
arg_vals = iter(args)
|
||||
|
||||
if partial:
|
||||
# Support for binding arguments to 'functools.partial' objects.
|
||||
# See 'functools.partial' case in 'signature()' implementation
|
||||
# for details.
|
||||
for param_name, param in self.parameters.items():
|
||||
if (param._partial_kwarg and param_name not in kwargs):
|
||||
# Simulating 'functools.partial' behavior
|
||||
kwargs[param_name] = param.default
|
||||
|
||||
while True:
|
||||
# Let's iterate through the positional arguments and corresponding
|
||||
# parameters
|
||||
try:
|
||||
arg_val = next(arg_vals)
|
||||
except StopIteration:
|
||||
# No more positional arguments
|
||||
try:
|
||||
param = next(parameters)
|
||||
except StopIteration:
|
||||
# No more parameters. That's it. Just need to check that
|
||||
# we have no `kwargs` after this while loop
|
||||
break
|
||||
else:
|
||||
if param.kind == _VAR_POSITIONAL:
|
||||
# That's OK, just empty *args. Let's start parsing
|
||||
# kwargs
|
||||
break
|
||||
elif param.name in kwargs:
|
||||
if param.kind == _POSITIONAL_ONLY:
|
||||
msg = '{arg!r} parameter is positional only, ' \
|
||||
'but was passed as a keyword'
|
||||
msg = msg.format(arg=param.name)
|
||||
raise TypeError(msg)
|
||||
parameters_ex = (param,)
|
||||
break
|
||||
elif (param.kind == _VAR_KEYWORD or
|
||||
param.default is not _empty):
|
||||
# That's fine too - we have a default value for this
|
||||
# parameter. So, lets start parsing `kwargs`, starting
|
||||
# with the current parameter
|
||||
parameters_ex = (param,)
|
||||
break
|
||||
else:
|
||||
if partial:
|
||||
parameters_ex = (param,)
|
||||
break
|
||||
else:
|
||||
msg = '{arg!r} parameter lacking default value'
|
||||
msg = msg.format(arg=param.name)
|
||||
raise TypeError(msg)
|
||||
else:
|
||||
# We have a positional argument to process
|
||||
try:
|
||||
param = next(parameters)
|
||||
except StopIteration:
|
||||
raise TypeError('too many positional arguments')
|
||||
else:
|
||||
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
|
||||
# Looks like we have no parameter for this positional
|
||||
# argument
|
||||
raise TypeError('too many positional arguments')
|
||||
|
||||
if param.kind == _VAR_POSITIONAL:
|
||||
# We have an '*args'-like argument, let's fill it with
|
||||
# all positional arguments we have left and move on to
|
||||
# the next phase
|
||||
values = [arg_val]
|
||||
values.extend(arg_vals)
|
||||
arguments[param.name] = tuple(values)
|
||||
break
|
||||
|
||||
if param.name in kwargs:
|
||||
raise TypeError('multiple values for argument '
|
||||
'{arg!r}'.format(arg=param.name))
|
||||
|
||||
arguments[param.name] = arg_val
|
||||
|
||||
# Now, we iterate through the remaining parameters to process
|
||||
# keyword arguments
|
||||
kwargs_param = None
|
||||
for param in itertools.chain(parameters_ex, parameters):
|
||||
if param.kind == _POSITIONAL_ONLY:
|
||||
# This should never happen in case of a properly built
|
||||
# Signature object (but let's have this check here
|
||||
# to ensure correct behaviour just in case)
|
||||
raise TypeError('{arg!r} parameter is positional only, '
|
||||
'but was passed as a keyword'. \
|
||||
format(arg=param.name))
|
||||
|
||||
if param.kind == _VAR_KEYWORD:
|
||||
# Memorize that we have a '**kwargs'-like parameter
|
||||
kwargs_param = param
|
||||
continue
|
||||
|
||||
param_name = param.name
|
||||
try:
|
||||
arg_val = kwargs.pop(param_name)
|
||||
except KeyError:
|
||||
# We have no value for this parameter. It's fine though,
|
||||
# if it has a default value, or it is an '*args'-like
|
||||
# parameter, left alone by the processing of positional
|
||||
# arguments.
|
||||
if (not partial and param.kind != _VAR_POSITIONAL and
|
||||
param.default is _empty):
|
||||
raise TypeError('{arg!r} parameter lacking default value'. \
|
||||
format(arg=param_name))
|
||||
|
||||
else:
|
||||
arguments[param_name] = arg_val
|
||||
|
||||
if kwargs:
|
||||
if kwargs_param is not None:
|
||||
# Process our '**kwargs'-like parameter
|
||||
arguments[kwargs_param.name] = kwargs
|
||||
else:
|
||||
raise TypeError('too many keyword arguments %r' % kwargs)
|
||||
|
||||
return self._bound_arguments_cls(self, arguments)
|
||||
|
||||
def bind(*args, **kwargs):
|
||||
'''Get a BoundArguments object, that maps the passed `args`
|
||||
and `kwargs` to the function's signature. Raises `TypeError`
|
||||
if the passed arguments can not be bound.
|
||||
'''
|
||||
return args[0]._bind(args[1:], kwargs)
|
||||
|
||||
def bind_partial(self, *args, **kwargs):
|
||||
'''Get a BoundArguments object, that partially maps the
|
||||
passed `args` and `kwargs` to the function's signature.
|
||||
Raises `TypeError` if the passed arguments can not be bound.
|
||||
'''
|
||||
return self._bind(args, kwargs, partial=True)
|
||||
|
||||
def __str__(self):
|
||||
result = []
|
||||
render_kw_only_separator = True
|
||||
for idx, param in enumerate(self.parameters.values()):
|
||||
formatted = str(param)
|
||||
|
||||
kind = param.kind
|
||||
if kind == _VAR_POSITIONAL:
|
||||
# OK, we have an '*args'-like parameter, so we won't need
|
||||
# a '*' to separate keyword-only arguments
|
||||
render_kw_only_separator = False
|
||||
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
|
||||
# We have a keyword-only parameter to render and we haven't
|
||||
# rendered an '*args'-like parameter before, so add a '*'
|
||||
# separator to the parameters list ("foo(arg1, *, arg2)" case)
|
||||
result.append('*')
|
||||
# This condition should be only triggered once, so
|
||||
# reset the flag
|
||||
render_kw_only_separator = False
|
||||
|
||||
result.append(formatted)
|
||||
|
||||
rendered = '({0})'.format(', '.join(result))
|
||||
|
||||
if self.return_annotation is not _empty:
|
||||
anno = formatannotation(self.return_annotation)
|
||||
rendered += ' -> {0}'.format(anno)
|
||||
|
||||
return rendered
|
|
@ -1 +0,0 @@
|
|||
__version__ = "1.0.2"
|
|
@ -1,297 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import argparse
|
||||
import codecs
|
||||
import encodings
|
||||
import io
|
||||
import sys
|
||||
|
||||
|
||||
utf_8 = encodings.search_function('utf8')
|
||||
|
||||
|
||||
class TokenSyntaxError(SyntaxError):
|
||||
def __init__(self, e, token):
|
||||
super(TokenSyntaxError, self).__init__(e)
|
||||
self.e = e
|
||||
self.token = token
|
||||
|
||||
|
||||
def _find_literal(s, start, level, parts, exprs):
|
||||
"""Roughly Python/ast.c:fstring_find_literal"""
|
||||
i = start
|
||||
parse_expr = True
|
||||
|
||||
while i < len(s):
|
||||
ch = s[i]
|
||||
|
||||
if ch in ('{', '}'):
|
||||
if level == 0:
|
||||
if i + 1 < len(s) and s[i + 1] == ch:
|
||||
i += 2
|
||||
parse_expr = False
|
||||
break
|
||||
elif ch == '}':
|
||||
raise SyntaxError("f-string: single '}' is not allowed")
|
||||
break
|
||||
|
||||
i += 1
|
||||
|
||||
parts.append(s[start:i])
|
||||
return i, parse_expr and i < len(s)
|
||||
|
||||
|
||||
def _find_expr(s, start, level, parts, exprs):
|
||||
"""Roughly Python/ast.c:fstring_find_expr"""
|
||||
i = start
|
||||
nested_depth = 0
|
||||
quote_char = None
|
||||
triple_quoted = None
|
||||
|
||||
def _check_end():
|
||||
if i == len(s):
|
||||
raise SyntaxError("f-string: expecting '}'")
|
||||
|
||||
if level >= 2:
|
||||
raise SyntaxError("f-string: expressions nested too deeply")
|
||||
|
||||
parts.append(s[i])
|
||||
i += 1
|
||||
|
||||
while i < len(s):
|
||||
ch = s[i]
|
||||
|
||||
if ch == '\\':
|
||||
raise SyntaxError(
|
||||
'f-string expression part cannot include a backslash',
|
||||
)
|
||||
if quote_char is not None:
|
||||
if ch == quote_char:
|
||||
if triple_quoted:
|
||||
if i + 2 < len(s) and s[i + 1] == ch and s[i + 2] == ch:
|
||||
i += 2
|
||||
quote_char = None
|
||||
triple_quoted = None
|
||||
else:
|
||||
quote_char = None
|
||||
triple_quoted = None
|
||||
elif ch in ('"', "'"):
|
||||
quote_char = ch
|
||||
if i + 2 < len(s) and s[i + 1] == ch and s[i + 2] == ch:
|
||||
triple_quoted = True
|
||||
i += 2
|
||||
else:
|
||||
triple_quoted = False
|
||||
elif ch in ('[', '{', '('):
|
||||
nested_depth += 1
|
||||
elif nested_depth and ch in (']', '}', ')'):
|
||||
nested_depth -= 1
|
||||
elif ch == '#':
|
||||
raise SyntaxError("f-string expression cannot include '#'")
|
||||
elif nested_depth == 0 and ch in ('!', ':', '}'):
|
||||
if ch == '!' and i + 1 < len(s) and s[i + 1] == '=':
|
||||
# Allow != at top level as `=` isn't a valid conversion
|
||||
pass
|
||||
else:
|
||||
break
|
||||
i += 1
|
||||
|
||||
if quote_char is not None:
|
||||
raise SyntaxError('f-string: unterminated string')
|
||||
elif nested_depth:
|
||||
raise SyntaxError("f-string: mismatched '(', '{', or '['")
|
||||
_check_end()
|
||||
|
||||
exprs.append(s[start + 1:i])
|
||||
|
||||
if s[i] == '!':
|
||||
parts.append(s[i])
|
||||
i += 1
|
||||
_check_end()
|
||||
parts.append(s[i])
|
||||
i += 1
|
||||
|
||||
_check_end()
|
||||
|
||||
if s[i] == ':':
|
||||
parts.append(s[i])
|
||||
i += 1
|
||||
_check_end()
|
||||
i = _fstring_parse(s, i, level + 1, parts, exprs)
|
||||
|
||||
_check_end()
|
||||
if s[i] != '}':
|
||||
raise SyntaxError("f-string: expecting '}'")
|
||||
|
||||
parts.append(s[i])
|
||||
i += 1
|
||||
return i
|
||||
|
||||
|
||||
def _fstring_parse(s, i, level, parts, exprs):
|
||||
"""Roughly Python/ast.c:fstring_find_literal_and_expr"""
|
||||
while True:
|
||||
i, parse_expr = _find_literal(s, i, level, parts, exprs)
|
||||
if i == len(s) or s[i] == '}':
|
||||
return i
|
||||
if parse_expr:
|
||||
i = _find_expr(s, i, level, parts, exprs)
|
||||
|
||||
|
||||
def _fstring_parse_outer(s, i, level, parts, exprs):
|
||||
for q in ('"' * 3, "'" * 3, '"', "'"):
|
||||
if s.startswith(q):
|
||||
s = s[len(q):len(s) - len(q)]
|
||||
break
|
||||
else:
|
||||
raise AssertionError('unreachable')
|
||||
parts.append(q)
|
||||
ret = _fstring_parse(s, i, level, parts, exprs)
|
||||
parts.append(q)
|
||||
return ret
|
||||
|
||||
|
||||
def _is_f(token):
|
||||
import tokenize_rt
|
||||
|
||||
prefix, _ = tokenize_rt.parse_string_literal(token.src)
|
||||
return 'f' in prefix.lower()
|
||||
|
||||
|
||||
def _make_fstring(tokens):
|
||||
import tokenize_rt
|
||||
|
||||
new_tokens = []
|
||||
exprs = []
|
||||
|
||||
for i, token in enumerate(tokens):
|
||||
if token.name == 'STRING' and _is_f(token):
|
||||
prefix, s = tokenize_rt.parse_string_literal(token.src)
|
||||
parts = []
|
||||
try:
|
||||
_fstring_parse_outer(s, 0, 0, parts, exprs)
|
||||
except SyntaxError as e:
|
||||
raise TokenSyntaxError(e, tokens[i - 1])
|
||||
if 'r' in prefix.lower():
|
||||
parts = [s.replace('\\', '\\\\') for s in parts]
|
||||
token = token._replace(src=''.join(parts))
|
||||
elif token.name == 'STRING':
|
||||
new_src = token.src.replace('{', '{{').replace('}', '}}')
|
||||
token = token._replace(src=new_src)
|
||||
new_tokens.append(token)
|
||||
|
||||
exprs = ('({})'.format(expr) for expr in exprs)
|
||||
format_src = '.format({})'.format(', '.join(exprs))
|
||||
new_tokens.append(tokenize_rt.Token('FORMAT', src=format_src))
|
||||
|
||||
return new_tokens
|
||||
|
||||
|
||||
def decode(b, errors='strict'):
|
||||
import tokenize_rt # pip install future-fstrings[rewrite]
|
||||
|
||||
u, length = utf_8.decode(b, errors)
|
||||
tokens = tokenize_rt.src_to_tokens(u)
|
||||
|
||||
to_replace = []
|
||||
start = end = seen_f = None
|
||||
|
||||
for i, token in enumerate(tokens):
|
||||
if start is None:
|
||||
if token.name == 'STRING':
|
||||
start, end = i, i + 1
|
||||
seen_f = _is_f(token)
|
||||
elif token.name == 'STRING':
|
||||
end = i + 1
|
||||
seen_f |= _is_f(token)
|
||||
elif token.name not in tokenize_rt.NON_CODING_TOKENS:
|
||||
if seen_f:
|
||||
to_replace.append((start, end))
|
||||
start = end = seen_f = None
|
||||
|
||||
for start, end in reversed(to_replace):
|
||||
try:
|
||||
tokens[start:end] = _make_fstring(tokens[start:end])
|
||||
except TokenSyntaxError as e:
|
||||
msg = str(e.e)
|
||||
line = u.splitlines()[e.token.line - 1]
|
||||
bts = line.encode('UTF-8')[:e.token.utf8_byte_offset]
|
||||
indent = len(bts.decode('UTF-8'))
|
||||
raise SyntaxError(msg + '\n\n' + line + '\n' + ' ' * indent + '^')
|
||||
return tokenize_rt.tokens_to_src(tokens), length
|
||||
|
||||
|
||||
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
|
||||
def _buffer_decode(self, input, errors, final): # pragma: no cover
|
||||
if final:
|
||||
return decode(input, errors)
|
||||
else:
|
||||
return '', 0
|
||||
|
||||
|
||||
class StreamReader(utf_8.streamreader, object):
|
||||
"""decode is deferred to support better error messages"""
|
||||
_stream = None
|
||||
_decoded = False
|
||||
|
||||
@property
|
||||
def stream(self):
|
||||
if not self._decoded:
|
||||
text, _ = decode(self._stream.read())
|
||||
self._stream = io.BytesIO(text.encode('UTF-8'))
|
||||
self._decoded = True
|
||||
return self._stream
|
||||
|
||||
@stream.setter
|
||||
def stream(self, stream):
|
||||
self._stream = stream
|
||||
self._decoded = False
|
||||
|
||||
|
||||
def _natively_supports_fstrings():
|
||||
try:
|
||||
return eval('f"hi"') == 'hi'
|
||||
except SyntaxError:
|
||||
return False
|
||||
|
||||
|
||||
fstring_decode = decode
|
||||
SUPPORTS_FSTRINGS = _natively_supports_fstrings()
|
||||
if SUPPORTS_FSTRINGS: # pragma: no cover
|
||||
decode = utf_8.decode # noqa
|
||||
IncrementalDecoder = utf_8.incrementaldecoder # noqa
|
||||
StreamReader = utf_8.streamreader # noqa
|
||||
|
||||
# codec api
|
||||
|
||||
codec_map = {
|
||||
name: codecs.CodecInfo(
|
||||
name=name,
|
||||
encode=utf_8.encode,
|
||||
decode=decode,
|
||||
incrementalencoder=utf_8.incrementalencoder,
|
||||
incrementaldecoder=IncrementalDecoder,
|
||||
streamreader=StreamReader,
|
||||
streamwriter=utf_8.streamwriter,
|
||||
)
|
||||
for name in ('future-fstrings', 'future_fstrings')
|
||||
}
|
||||
|
||||
|
||||
def register(): # pragma: no cover
|
||||
codecs.register(codec_map.get)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
parser = argparse.ArgumentParser(description='Prints transformed source.')
|
||||
parser.add_argument('filename')
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
with open(args.filename, 'rb') as f:
|
||||
text, _ = fstring_decode(f.read())
|
||||
getattr(sys.stdout, 'buffer', sys.stdout).write(text.encode('UTF-8'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
exit(main())
|
1928
lib/ipaddr.py
1928
lib/ipaddr.py
File diff suppressed because it is too large
Load diff
2417
lib/ipaddress.py
2417
lib/ipaddress.py
File diff suppressed because it is too large
Load diff
|
@ -1,195 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
"""
|
||||
The logutils package provides a set of handlers for the Python standard
|
||||
library's logging package.
|
||||
|
||||
Some of these handlers are out-of-scope for the standard library, and
|
||||
so they are packaged here. Others are updated versions which have
|
||||
appeared in recent Python releases, but are usable with older versions
|
||||
of Python, and so are packaged here.
|
||||
"""
|
||||
import logging
|
||||
from string import Template
|
||||
|
||||
__version__ = '0.3.3'
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
"""
|
||||
This handler does nothing. It's intended to be used to avoid the
|
||||
"No handlers could be found for logger XXX" one-off warning. This is
|
||||
important for library code, which may contain code to log events. If a user
|
||||
of the library does not configure logging, the one-off warning might be
|
||||
produced; to avoid this, the library developer simply needs to instantiate
|
||||
a NullHandler and add it to the top-level logger of the library module or
|
||||
package.
|
||||
"""
|
||||
|
||||
def handle(self, record):
|
||||
"""
|
||||
Handle a record. Does nothing in this class, but in other
|
||||
handlers it typically filters and then emits the record in a
|
||||
thread-safe way.
|
||||
"""
|
||||
pass
|
||||
|
||||
def emit(self, record):
|
||||
"""
|
||||
Emit a record. This does nothing and shouldn't be called during normal
|
||||
processing, unless you redefine :meth:`~logutils.NullHandler.handle`.
|
||||
"""
|
||||
pass
|
||||
|
||||
def createLock(self):
|
||||
"""
|
||||
Since this handler does nothing, it has no underlying I/O to protect
|
||||
against multi-threaded access, so this method returns `None`.
|
||||
"""
|
||||
self.lock = None
|
||||
|
||||
class PercentStyle(object):
|
||||
|
||||
default_format = '%(message)s'
|
||||
asctime_format = '%(asctime)s'
|
||||
|
||||
def __init__(self, fmt):
|
||||
self._fmt = fmt or self.default_format
|
||||
|
||||
def usesTime(self):
|
||||
return self._fmt.find(self.asctime_format) >= 0
|
||||
|
||||
def format(self, record):
|
||||
return self._fmt % record.__dict__
|
||||
|
||||
class StrFormatStyle(PercentStyle):
|
||||
default_format = '{message}'
|
||||
asctime_format = '{asctime}'
|
||||
|
||||
def format(self, record):
|
||||
return self._fmt.format(**record.__dict__)
|
||||
|
||||
|
||||
class StringTemplateStyle(PercentStyle):
|
||||
default_format = '${message}'
|
||||
asctime_format = '${asctime}'
|
||||
|
||||
def __init__(self, fmt):
|
||||
self._fmt = fmt or self.default_format
|
||||
self._tpl = Template(self._fmt)
|
||||
|
||||
def usesTime(self):
|
||||
fmt = self._fmt
|
||||
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
|
||||
|
||||
def format(self, record):
|
||||
return self._tpl.substitute(**record.__dict__)
|
||||
|
||||
_STYLES = {
|
||||
'%': PercentStyle,
|
||||
'{': StrFormatStyle,
|
||||
'$': StringTemplateStyle
|
||||
}
|
||||
|
||||
class Formatter(logging.Formatter):
|
||||
"""
|
||||
Subclasses Formatter in Pythons earlier than 3.2 in order to give
|
||||
3.2 Formatter behaviour with respect to allowing %-, {} or $-
|
||||
formatting.
|
||||
"""
|
||||
def __init__(self, fmt=None, datefmt=None, style='%'):
|
||||
"""
|
||||
Initialize the formatter with specified format strings.
|
||||
|
||||
Initialize the formatter either with the specified format string, or a
|
||||
default as described above. Allow for specialized date formatting with
|
||||
the optional datefmt argument (if omitted, you get the ISO8601 format).
|
||||
|
||||
Use a style parameter of '%', '{' or '$' to specify that you want to
|
||||
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
|
||||
:class:`string.Template` formatting in your format string.
|
||||
"""
|
||||
if style not in _STYLES:
|
||||
raise ValueError('Style must be one of: %s' % ','.join(
|
||||
_STYLES.keys()))
|
||||
self._style = _STYLES[style](fmt)
|
||||
self._fmt = self._style._fmt
|
||||
self.datefmt = datefmt
|
||||
|
||||
def usesTime(self):
|
||||
"""
|
||||
Check if the format uses the creation time of the record.
|
||||
"""
|
||||
return self._style.usesTime()
|
||||
|
||||
def formatMessage(self, record):
|
||||
return self._style.format(record)
|
||||
|
||||
def format(self, record):
|
||||
"""
|
||||
Format the specified record as text.
|
||||
|
||||
The record's attribute dictionary is used as the operand to a
|
||||
string formatting operation which yields the returned string.
|
||||
Before formatting the dictionary, a couple of preparatory steps
|
||||
are carried out. The message attribute of the record is computed
|
||||
using LogRecord.getMessage(). If the formatting string uses the
|
||||
time (as determined by a call to usesTime(), formatTime() is
|
||||
called to format the event time. If there is exception information,
|
||||
it is formatted using formatException() and appended to the message.
|
||||
"""
|
||||
record.message = record.getMessage()
|
||||
if self.usesTime():
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
s = self.formatMessage(record)
|
||||
if record.exc_info:
|
||||
# Cache the traceback text to avoid converting it multiple times
|
||||
# (it's constant anyway)
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
if record.exc_text:
|
||||
if s[-1:] != "\n":
|
||||
s = s + "\n"
|
||||
s = s + record.exc_text
|
||||
return s
|
||||
|
||||
|
||||
class BraceMessage(object):
|
||||
def __init__(self, fmt, *args, **kwargs):
|
||||
self.fmt = fmt
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.str = None
|
||||
|
||||
def __str__(self):
|
||||
if self.str is None:
|
||||
self.str = self.fmt.format(*self.args, **self.kwargs)
|
||||
return self.str
|
||||
|
||||
class DollarMessage(object):
|
||||
def __init__(self, fmt, **kwargs):
|
||||
self.fmt = fmt
|
||||
self.kwargs = kwargs
|
||||
self.str = None
|
||||
|
||||
def __str__(self):
|
||||
if self.str is None:
|
||||
self.str = Template(self.fmt).substitute(**self.kwargs)
|
||||
return self.str
|
||||
|
||||
|
||||
def hasHandlers(logger):
|
||||
"""
|
||||
See if a logger has any handlers.
|
||||
"""
|
||||
rv = False
|
||||
while logger:
|
||||
if logger.handlers:
|
||||
rv = True
|
||||
break
|
||||
elif not logger.propagate:
|
||||
break
|
||||
else:
|
||||
logger = logger.parent
|
||||
return rv
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
import logging
|
||||
import logutils
|
||||
|
||||
class LoggerAdapter(object):
|
||||
"""
|
||||
An adapter for loggers which makes it easier to specify contextual
|
||||
information in logging output.
|
||||
"""
|
||||
|
||||
def __init__(self, logger, extra):
|
||||
"""
|
||||
Initialize the adapter with a logger and a dict-like object which
|
||||
provides contextual information. This constructor signature allows
|
||||
easy stacking of LoggerAdapters, if so desired.
|
||||
|
||||
You can effectively pass keyword arguments as shown in the
|
||||
following example:
|
||||
|
||||
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
|
||||
"""
|
||||
self.logger = logger
|
||||
self.extra = extra
|
||||
|
||||
def process(self, msg, kwargs):
|
||||
"""
|
||||
Process the logging message and keyword arguments passed in to
|
||||
a logging call to insert contextual information. You can either
|
||||
manipulate the message itself, the keyword args or both. Return
|
||||
the message and kwargs modified (or not) to suit your needs.
|
||||
|
||||
Normally, you'll only need to override this one method in a
|
||||
LoggerAdapter subclass for your specific needs.
|
||||
"""
|
||||
kwargs["extra"] = self.extra
|
||||
return msg, kwargs
|
||||
|
||||
#
|
||||
# Boilerplate convenience methods
|
||||
#
|
||||
def debug(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate a debug call to the underlying logger.
|
||||
"""
|
||||
self.log(logging.DEBUG, msg, *args, **kwargs)
|
||||
|
||||
def info(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate an info call to the underlying logger.
|
||||
"""
|
||||
self.log(logging.INFO, msg, *args, **kwargs)
|
||||
|
||||
def warning(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate a warning call to the underlying logger.
|
||||
"""
|
||||
self.log(logging.WARNING, msg, *args, **kwargs)
|
||||
|
||||
warn = warning
|
||||
|
||||
def error(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate an error call to the underlying logger.
|
||||
"""
|
||||
self.log(logging.ERROR, msg, *args, **kwargs)
|
||||
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate an exception call to the underlying logger.
|
||||
"""
|
||||
kwargs["exc_info"] = 1
|
||||
self.log(logging.ERROR, msg, *args, **kwargs)
|
||||
|
||||
def critical(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate a critical call to the underlying logger.
|
||||
"""
|
||||
self.log(logging.CRITICAL, msg, *args, **kwargs)
|
||||
|
||||
def log(self, level, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate a log call to the underlying logger, after adding
|
||||
contextual information from this adapter instance.
|
||||
"""
|
||||
if self.isEnabledFor(level):
|
||||
msg, kwargs = self.process(msg, kwargs)
|
||||
self.logger._log(level, msg, args, **kwargs)
|
||||
|
||||
def isEnabledFor(self, level):
|
||||
"""
|
||||
Is this logger enabled for level 'level'?
|
||||
"""
|
||||
if self.logger.manager.disable >= level:
|
||||
return False
|
||||
return level >= self.getEffectiveLevel()
|
||||
|
||||
def setLevel(self, level):
|
||||
"""
|
||||
Set the specified level on the underlying logger.
|
||||
"""
|
||||
self.logger.setLevel(level)
|
||||
|
||||
def getEffectiveLevel(self):
|
||||
"""
|
||||
Get the effective level for the underlying logger.
|
||||
"""
|
||||
return self.logger.getEffectiveLevel()
|
||||
|
||||
def hasHandlers(self):
|
||||
"""
|
||||
See if the underlying logger has any handlers.
|
||||
"""
|
||||
return logutils.hasHandlers(self.logger)
|
||||
|
|
@ -1,194 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2010-2013 Vinay Sajip. All rights reserved.
|
||||
#
|
||||
import ctypes
|
||||
import logging
|
||||
import os
|
||||
|
||||
try:
|
||||
unicode
|
||||
except NameError:
|
||||
unicode = None
|
||||
|
||||
class ColorizingStreamHandler(logging.StreamHandler):
|
||||
"""
|
||||
A stream handler which supports colorizing of console streams
|
||||
under Windows, Linux and Mac OS X.
|
||||
|
||||
:param strm: The stream to colorize - typically ``sys.stdout``
|
||||
or ``sys.stderr``.
|
||||
"""
|
||||
|
||||
# color names to indices
|
||||
color_map = {
|
||||
'black': 0,
|
||||
'red': 1,
|
||||
'green': 2,
|
||||
'yellow': 3,
|
||||
'blue': 4,
|
||||
'magenta': 5,
|
||||
'cyan': 6,
|
||||
'white': 7,
|
||||
}
|
||||
|
||||
#levels to (background, foreground, bold/intense)
|
||||
if os.name == 'nt':
|
||||
level_map = {
|
||||
logging.DEBUG: (None, 'blue', True),
|
||||
logging.INFO: (None, 'white', False),
|
||||
logging.WARNING: (None, 'yellow', True),
|
||||
logging.ERROR: (None, 'red', True),
|
||||
logging.CRITICAL: ('red', 'white', True),
|
||||
}
|
||||
else:
|
||||
"Maps levels to colour/intensity settings."
|
||||
level_map = {
|
||||
logging.DEBUG: (None, 'blue', False),
|
||||
logging.INFO: (None, 'black', False),
|
||||
logging.WARNING: (None, 'yellow', False),
|
||||
logging.ERROR: (None, 'red', False),
|
||||
logging.CRITICAL: ('red', 'white', True),
|
||||
}
|
||||
|
||||
csi = '\x1b['
|
||||
reset = '\x1b[0m'
|
||||
|
||||
@property
|
||||
def is_tty(self):
|
||||
"Returns true if the handler's stream is a terminal."
|
||||
isatty = getattr(self.stream, 'isatty', None)
|
||||
return isatty and isatty()
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
message = self.format(record)
|
||||
stream = self.stream
|
||||
if unicode and isinstance(message, unicode):
|
||||
enc = getattr(stream, 'encoding', 'utf-8')
|
||||
message = message.encode(enc, 'replace')
|
||||
if not self.is_tty:
|
||||
stream.write(message)
|
||||
else:
|
||||
self.output_colorized(message)
|
||||
stream.write(getattr(self, 'terminator', '\n'))
|
||||
self.flush()
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except:
|
||||
self.handleError(record)
|
||||
|
||||
if os.name != 'nt':
|
||||
def output_colorized(self, message):
|
||||
"""
|
||||
Output a colorized message.
|
||||
|
||||
On Linux and Mac OS X, this method just writes the
|
||||
already-colorized message to the stream, since on these
|
||||
platforms console streams accept ANSI escape sequences
|
||||
for colorization. On Windows, this handler implements a
|
||||
subset of ANSI escape sequence handling by parsing the
|
||||
message, extracting the sequences and making Win32 API
|
||||
calls to colorize the output.
|
||||
|
||||
:param message: The message to colorize and output.
|
||||
"""
|
||||
self.stream.write(message)
|
||||
else:
|
||||
import re
|
||||
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
|
||||
|
||||
nt_color_map = {
|
||||
0: 0x00, # black
|
||||
1: 0x04, # red
|
||||
2: 0x02, # green
|
||||
3: 0x06, # yellow
|
||||
4: 0x01, # blue
|
||||
5: 0x05, # magenta
|
||||
6: 0x03, # cyan
|
||||
7: 0x07, # white
|
||||
}
|
||||
|
||||
def output_colorized(self, message):
|
||||
"""
|
||||
Output a colorized message.
|
||||
|
||||
On Linux and Mac OS X, this method just writes the
|
||||
already-colorized message to the stream, since on these
|
||||
platforms console streams accept ANSI escape sequences
|
||||
for colorization. On Windows, this handler implements a
|
||||
subset of ANSI escape sequence handling by parsing the
|
||||
message, extracting the sequences and making Win32 API
|
||||
calls to colorize the output.
|
||||
|
||||
:param message: The message to colorize and output.
|
||||
"""
|
||||
parts = self.ansi_esc.split(message)
|
||||
write = self.stream.write
|
||||
h = None
|
||||
fd = getattr(self.stream, 'fileno', None)
|
||||
if fd is not None:
|
||||
fd = fd()
|
||||
if fd in (1, 2): # stdout or stderr
|
||||
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
|
||||
while parts:
|
||||
text = parts.pop(0)
|
||||
if text:
|
||||
write(text)
|
||||
if parts:
|
||||
params = parts.pop(0)
|
||||
if h is not None:
|
||||
params = [int(p) for p in params.split(';')]
|
||||
color = 0
|
||||
for p in params:
|
||||
if 40 <= p <= 47:
|
||||
color |= self.nt_color_map[p - 40] << 4
|
||||
elif 30 <= p <= 37:
|
||||
color |= self.nt_color_map[p - 30]
|
||||
elif p == 1:
|
||||
color |= 0x08 # foreground intensity on
|
||||
elif p == 0: # reset to default color
|
||||
color = 0x07
|
||||
else:
|
||||
pass # error condition ignored
|
||||
ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)
|
||||
|
||||
def colorize(self, message, record):
|
||||
"""
|
||||
Colorize a message for a logging event.
|
||||
|
||||
This implementation uses the ``level_map`` class attribute to
|
||||
map the LogRecord's level to a colour/intensity setting, which is
|
||||
then applied to the whole message.
|
||||
|
||||
:param message: The message to colorize.
|
||||
:param record: The ``LogRecord`` for the message.
|
||||
"""
|
||||
if record.levelno in self.level_map:
|
||||
bg, fg, bold = self.level_map[record.levelno]
|
||||
params = []
|
||||
if bg in self.color_map:
|
||||
params.append(str(self.color_map[bg] + 40))
|
||||
if fg in self.color_map:
|
||||
params.append(str(self.color_map[fg] + 30))
|
||||
if bold:
|
||||
params.append('1')
|
||||
if params:
|
||||
message = ''.join((self.csi, ';'.join(params),
|
||||
'm', message, self.reset))
|
||||
return message
|
||||
|
||||
def format(self, record):
|
||||
"""
|
||||
Formats a record for output.
|
||||
|
||||
This implementation colorizes the message line, but leaves
|
||||
any traceback unolorized.
|
||||
"""
|
||||
message = logging.StreamHandler.format(self, record)
|
||||
if self.is_tty:
|
||||
# Don't colorize any traceback
|
||||
parts = message.split('\n', 1)
|
||||
parts[0] = self.colorize(parts[0], record)
|
||||
message = '\n'.join(parts)
|
||||
return message
|
||||
|
|
@ -1,573 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2009-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
import logging.handlers
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
|
||||
try:
|
||||
basestring
|
||||
except NameError:
|
||||
basestring = str
|
||||
try:
|
||||
StandardError
|
||||
except NameError:
|
||||
StandardError = Exception
|
||||
|
||||
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
|
||||
|
||||
def valid_ident(s):
|
||||
m = IDENTIFIER.match(s)
|
||||
if not m:
|
||||
raise ValueError('Not a valid Python identifier: %r' % s)
|
||||
return True
|
||||
|
||||
#
|
||||
# This function is defined in logging only in recent versions of Python
|
||||
#
|
||||
try:
|
||||
from logging import _checkLevel
|
||||
except ImportError:
|
||||
def _checkLevel(level):
|
||||
if isinstance(level, int):
|
||||
rv = level
|
||||
elif str(level) == level:
|
||||
if level not in logging._levelNames:
|
||||
raise ValueError('Unknown level: %r' % level)
|
||||
rv = logging._levelNames[level]
|
||||
else:
|
||||
raise TypeError('Level not an integer or a '
|
||||
'valid string: %r' % level)
|
||||
return rv
|
||||
|
||||
# The ConvertingXXX classes are wrappers around standard Python containers,
|
||||
# and they serve to convert any suitable values in the container. The
|
||||
# conversion converts base dicts, lists and tuples to their wrapped
|
||||
# equivalents, whereas strings which match a conversion format are converted
|
||||
# appropriately.
|
||||
#
|
||||
# Each wrapper should have a configurator attribute holding the actual
|
||||
# configurator to use for conversion.
|
||||
|
||||
class ConvertingDict(dict):
|
||||
"""A converting dictionary wrapper."""
|
||||
|
||||
def __getitem__(self, key):
|
||||
value = dict.__getitem__(self, key)
|
||||
result = self.configurator.convert(value)
|
||||
#If the converted value is different, save for next time
|
||||
if value is not result:
|
||||
self[key] = result
|
||||
if type(result) in (ConvertingDict, ConvertingList,
|
||||
ConvertingTuple):
|
||||
result.parent = self
|
||||
result.key = key
|
||||
return result
|
||||
|
||||
def get(self, key, default=None):
|
||||
value = dict.get(self, key, default)
|
||||
result = self.configurator.convert(value)
|
||||
#If the converted value is different, save for next time
|
||||
if value is not result:
|
||||
self[key] = result
|
||||
if type(result) in (ConvertingDict, ConvertingList,
|
||||
ConvertingTuple):
|
||||
result.parent = self
|
||||
result.key = key
|
||||
return result
|
||||
|
||||
def pop(self, key, default=None):
|
||||
value = dict.pop(self, key, default)
|
||||
result = self.configurator.convert(value)
|
||||
if value is not result:
|
||||
if type(result) in (ConvertingDict, ConvertingList,
|
||||
ConvertingTuple):
|
||||
result.parent = self
|
||||
result.key = key
|
||||
return result
|
||||
|
||||
class ConvertingList(list):
|
||||
"""A converting list wrapper."""
|
||||
def __getitem__(self, key):
|
||||
value = list.__getitem__(self, key)
|
||||
result = self.configurator.convert(value)
|
||||
#If the converted value is different, save for next time
|
||||
if value is not result:
|
||||
self[key] = result
|
||||
if type(result) in (ConvertingDict, ConvertingList,
|
||||
ConvertingTuple):
|
||||
result.parent = self
|
||||
result.key = key
|
||||
return result
|
||||
|
||||
def pop(self, idx=-1):
|
||||
value = list.pop(self, idx)
|
||||
result = self.configurator.convert(value)
|
||||
if value is not result:
|
||||
if type(result) in (ConvertingDict, ConvertingList,
|
||||
ConvertingTuple):
|
||||
result.parent = self
|
||||
return result
|
||||
|
||||
class ConvertingTuple(tuple):
|
||||
"""A converting tuple wrapper."""
|
||||
def __getitem__(self, key):
|
||||
value = tuple.__getitem__(self, key)
|
||||
result = self.configurator.convert(value)
|
||||
if value is not result:
|
||||
if type(result) in (ConvertingDict, ConvertingList,
|
||||
ConvertingTuple):
|
||||
result.parent = self
|
||||
result.key = key
|
||||
return result
|
||||
|
||||
class BaseConfigurator(object):
|
||||
"""
|
||||
The configurator base class which defines some useful defaults.
|
||||
"""
|
||||
|
||||
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
|
||||
|
||||
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
|
||||
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
|
||||
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
|
||||
DIGIT_PATTERN = re.compile(r'^\d+$')
|
||||
|
||||
value_converters = {
|
||||
'ext' : 'ext_convert',
|
||||
'cfg' : 'cfg_convert',
|
||||
}
|
||||
|
||||
# We might want to use a different one, e.g. importlib
|
||||
importer = __import__
|
||||
"Allows the importer to be redefined."
|
||||
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialise an instance with the specified configuration
|
||||
dictionary.
|
||||
"""
|
||||
self.config = ConvertingDict(config)
|
||||
self.config.configurator = self
|
||||
|
||||
def resolve(self, s):
|
||||
"""
|
||||
Resolve strings to objects using standard import and attribute
|
||||
syntax.
|
||||
"""
|
||||
name = s.split('.')
|
||||
used = name.pop(0)
|
||||
try:
|
||||
found = self.importer(used)
|
||||
for frag in name:
|
||||
used += '.' + frag
|
||||
try:
|
||||
found = getattr(found, frag)
|
||||
except AttributeError:
|
||||
self.importer(used)
|
||||
found = getattr(found, frag)
|
||||
return found
|
||||
except ImportError:
|
||||
e, tb = sys.exc_info()[1:]
|
||||
v = ValueError('Cannot resolve %r: %s' % (s, e))
|
||||
v.__cause__, v.__traceback__ = e, tb
|
||||
raise v
|
||||
|
||||
def ext_convert(self, value):
|
||||
"""Default converter for the ext:// protocol."""
|
||||
return self.resolve(value)
|
||||
|
||||
def cfg_convert(self, value):
|
||||
"""Default converter for the cfg:// protocol."""
|
||||
rest = value
|
||||
m = self.WORD_PATTERN.match(rest)
|
||||
if m is None:
|
||||
raise ValueError("Unable to convert %r" % value)
|
||||
else:
|
||||
rest = rest[m.end():]
|
||||
d = self.config[m.groups()[0]]
|
||||
#print d, rest
|
||||
while rest:
|
||||
m = self.DOT_PATTERN.match(rest)
|
||||
if m:
|
||||
d = d[m.groups()[0]]
|
||||
else:
|
||||
m = self.INDEX_PATTERN.match(rest)
|
||||
if m:
|
||||
idx = m.groups()[0]
|
||||
if not self.DIGIT_PATTERN.match(idx):
|
||||
d = d[idx]
|
||||
else:
|
||||
try:
|
||||
n = int(idx) # try as number first (most likely)
|
||||
d = d[n]
|
||||
except TypeError:
|
||||
d = d[idx]
|
||||
if m:
|
||||
rest = rest[m.end():]
|
||||
else:
|
||||
raise ValueError('Unable to convert '
|
||||
'%r at %r' % (value, rest))
|
||||
#rest should be empty
|
||||
return d
|
||||
|
||||
def convert(self, value):
|
||||
"""
|
||||
Convert values to an appropriate type. dicts, lists and tuples are
|
||||
replaced by their converting alternatives. Strings are checked to
|
||||
see if they have a conversion format and are converted if they do.
|
||||
"""
|
||||
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
|
||||
value = ConvertingDict(value)
|
||||
value.configurator = self
|
||||
elif not isinstance(value, ConvertingList) and isinstance(value, list):
|
||||
value = ConvertingList(value)
|
||||
value.configurator = self
|
||||
elif not isinstance(value, ConvertingTuple) and\
|
||||
isinstance(value, tuple):
|
||||
value = ConvertingTuple(value)
|
||||
value.configurator = self
|
||||
elif isinstance(value, basestring):
|
||||
m = self.CONVERT_PATTERN.match(value)
|
||||
if m:
|
||||
d = m.groupdict()
|
||||
prefix = d['prefix']
|
||||
converter = self.value_converters.get(prefix, None)
|
||||
if converter:
|
||||
suffix = d['suffix']
|
||||
converter = getattr(self, converter)
|
||||
value = converter(suffix)
|
||||
return value
|
||||
|
||||
def configure_custom(self, config):
|
||||
"""Configure an object with a user-supplied factory."""
|
||||
c = config.pop('()')
|
||||
if isinstance(c, basestring):
|
||||
c = self.resolve(c)
|
||||
props = config.pop('.', None)
|
||||
# Check for valid identifiers
|
||||
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
|
||||
result = c(**kwargs)
|
||||
if props:
|
||||
for name, value in props.items():
|
||||
setattr(result, name, value)
|
||||
return result
|
||||
|
||||
def as_tuple(self, value):
|
||||
"""Utility function which converts lists to tuples."""
|
||||
if isinstance(value, list):
|
||||
value = tuple(value)
|
||||
return value
|
||||
|
||||
def named_handlers_supported():
|
||||
major, minor = sys.version_info[:2]
|
||||
if major == 2:
|
||||
result = minor >= 7
|
||||
elif major == 3:
|
||||
result = minor >= 2
|
||||
else:
|
||||
result = (major > 3)
|
||||
return result
|
||||
|
||||
class DictConfigurator(BaseConfigurator):
|
||||
"""
|
||||
Configure logging using a dictionary-like object to describe the
|
||||
configuration.
|
||||
"""
|
||||
|
||||
def configure(self):
|
||||
"""Do the configuration."""
|
||||
|
||||
config = self.config
|
||||
if 'version' not in config:
|
||||
raise ValueError("dictionary doesn't specify a version")
|
||||
if config['version'] != 1:
|
||||
raise ValueError("Unsupported version: %s" % config['version'])
|
||||
incremental = config.pop('incremental', False)
|
||||
EMPTY_DICT = {}
|
||||
logging._acquireLock()
|
||||
try:
|
||||
if incremental:
|
||||
handlers = config.get('handlers', EMPTY_DICT)
|
||||
# incremental handler config only if handler name
|
||||
# ties in to logging._handlers (Python 2.7, 3.2+)
|
||||
if named_handlers_supported():
|
||||
for name in handlers:
|
||||
if name not in logging._handlers:
|
||||
raise ValueError('No handler found with '
|
||||
'name %r' % name)
|
||||
else:
|
||||
try:
|
||||
handler = logging._handlers[name]
|
||||
handler_config = handlers[name]
|
||||
level = handler_config.get('level', None)
|
||||
if level:
|
||||
handler.setLevel(_checkLevel(level))
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure handler '
|
||||
'%r: %s' % (name, e))
|
||||
loggers = config.get('loggers', EMPTY_DICT)
|
||||
for name in loggers:
|
||||
try:
|
||||
self.configure_logger(name, loggers[name], True)
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure logger '
|
||||
'%r: %s' % (name, e))
|
||||
root = config.get('root', None)
|
||||
if root:
|
||||
try:
|
||||
self.configure_root(root, True)
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure root '
|
||||
'logger: %s' % e)
|
||||
else:
|
||||
disable_existing = config.pop('disable_existing_loggers', True)
|
||||
|
||||
logging._handlers.clear()
|
||||
del logging._handlerList[:]
|
||||
|
||||
# Do formatters first - they don't refer to anything else
|
||||
formatters = config.get('formatters', EMPTY_DICT)
|
||||
for name in formatters:
|
||||
try:
|
||||
formatters[name] = self.configure_formatter(
|
||||
formatters[name])
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure '
|
||||
'formatter %r: %s' % (name, e))
|
||||
# Next, do filters - they don't refer to anything else, either
|
||||
filters = config.get('filters', EMPTY_DICT)
|
||||
for name in filters:
|
||||
try:
|
||||
filters[name] = self.configure_filter(filters[name])
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure '
|
||||
'filter %r: %s' % (name, e))
|
||||
|
||||
# Next, do handlers - they refer to formatters and filters
|
||||
# As handlers can refer to other handlers, sort the keys
|
||||
# to allow a deterministic order of configuration
|
||||
handlers = config.get('handlers', EMPTY_DICT)
|
||||
for name in sorted(handlers):
|
||||
try:
|
||||
handler = self.configure_handler(handlers[name])
|
||||
handler.name = name
|
||||
handlers[name] = handler
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure handler '
|
||||
'%r: %s' % (name, e))
|
||||
# Next, do loggers - they refer to handlers and filters
|
||||
|
||||
#we don't want to lose the existing loggers,
|
||||
#since other threads may have pointers to them.
|
||||
#existing is set to contain all existing loggers,
|
||||
#and as we go through the new configuration we
|
||||
#remove any which are configured. At the end,
|
||||
#what's left in existing is the set of loggers
|
||||
#which were in the previous configuration but
|
||||
#which are not in the new configuration.
|
||||
root = logging.root
|
||||
existing = sorted(root.manager.loggerDict.keys())
|
||||
#The list needs to be sorted so that we can
|
||||
#avoid disabling child loggers of explicitly
|
||||
#named loggers. With a sorted list it is easier
|
||||
#to find the child loggers.
|
||||
#We'll keep the list of existing loggers
|
||||
#which are children of named loggers here...
|
||||
child_loggers = []
|
||||
#now set up the new ones...
|
||||
loggers = config.get('loggers', EMPTY_DICT)
|
||||
for name in loggers:
|
||||
if name in existing:
|
||||
i = existing.index(name)
|
||||
prefixed = name + "."
|
||||
pflen = len(prefixed)
|
||||
num_existing = len(existing)
|
||||
i = i + 1 # look at the entry after name
|
||||
while (i < num_existing) and\
|
||||
(existing[i][:pflen] == prefixed):
|
||||
child_loggers.append(existing[i])
|
||||
i = i + 1
|
||||
existing.remove(name)
|
||||
try:
|
||||
self.configure_logger(name, loggers[name])
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure logger '
|
||||
'%r: %s' % (name, e))
|
||||
|
||||
#Disable any old loggers. There's no point deleting
|
||||
#them as other threads may continue to hold references
|
||||
#and by disabling them, you stop them doing any logging.
|
||||
#However, don't disable children of named loggers, as that's
|
||||
#probably not what was intended by the user.
|
||||
for log in existing:
|
||||
logger = root.manager.loggerDict[log]
|
||||
if log in child_loggers:
|
||||
logger.level = logging.NOTSET
|
||||
logger.handlers = []
|
||||
logger.propagate = True
|
||||
elif disable_existing:
|
||||
logger.disabled = True
|
||||
|
||||
# And finally, do the root logger
|
||||
root = config.get('root', None)
|
||||
if root:
|
||||
try:
|
||||
self.configure_root(root)
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to configure root '
|
||||
'logger: %s' % e)
|
||||
finally:
|
||||
logging._releaseLock()
|
||||
|
||||
def configure_formatter(self, config):
|
||||
"""Configure a formatter from a dictionary."""
|
||||
if '()' in config:
|
||||
factory = config['()'] # for use in exception handler
|
||||
try:
|
||||
result = self.configure_custom(config)
|
||||
except TypeError:
|
||||
te = sys.exc_info()[1]
|
||||
if "'format'" not in str(te):
|
||||
raise
|
||||
#Name of parameter changed from fmt to format.
|
||||
#Retry with old name.
|
||||
#This is so that code can be used with older Python versions
|
||||
#(e.g. by Django)
|
||||
config['fmt'] = config.pop('format')
|
||||
config['()'] = factory
|
||||
result = self.configure_custom(config)
|
||||
else:
|
||||
fmt = config.get('format', None)
|
||||
dfmt = config.get('datefmt', None)
|
||||
result = logging.Formatter(fmt, dfmt)
|
||||
return result
|
||||
|
||||
def configure_filter(self, config):
|
||||
"""Configure a filter from a dictionary."""
|
||||
if '()' in config:
|
||||
result = self.configure_custom(config)
|
||||
else:
|
||||
name = config.get('name', '')
|
||||
result = logging.Filter(name)
|
||||
return result
|
||||
|
||||
def add_filters(self, filterer, filters):
|
||||
"""Add filters to a filterer from a list of names."""
|
||||
for f in filters:
|
||||
try:
|
||||
filterer.addFilter(self.config['filters'][f])
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to add filter %r: %s' % (f, e))
|
||||
|
||||
def configure_handler(self, config):
|
||||
"""Configure a handler from a dictionary."""
|
||||
formatter = config.pop('formatter', None)
|
||||
if formatter:
|
||||
try:
|
||||
formatter = self.config['formatters'][formatter]
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to set formatter '
|
||||
'%r: %s' % (formatter, e))
|
||||
level = config.pop('level', None)
|
||||
filters = config.pop('filters', None)
|
||||
if '()' in config:
|
||||
c = config.pop('()')
|
||||
if isinstance(c, basestring):
|
||||
c = self.resolve(c)
|
||||
factory = c
|
||||
else:
|
||||
klass = self.resolve(config.pop('class'))
|
||||
#Special case for handler which refers to another handler
|
||||
if issubclass(klass, logging.handlers.MemoryHandler) and\
|
||||
'target' in config:
|
||||
try:
|
||||
config['target'] = self.config['handlers'][config['target']]
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to set target handler '
|
||||
'%r: %s' % (config['target'], e))
|
||||
elif issubclass(klass, logging.handlers.SMTPHandler) and\
|
||||
'mailhost' in config:
|
||||
config['mailhost'] = self.as_tuple(config['mailhost'])
|
||||
elif issubclass(klass, logging.handlers.SysLogHandler) and\
|
||||
'address' in config:
|
||||
config['address'] = self.as_tuple(config['address'])
|
||||
factory = klass
|
||||
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
|
||||
try:
|
||||
result = factory(**kwargs)
|
||||
except TypeError:
|
||||
te = sys.exc_info()[1]
|
||||
if "'stream'" not in str(te):
|
||||
raise
|
||||
#The argument name changed from strm to stream
|
||||
#Retry with old name.
|
||||
#This is so that code can be used with older Python versions
|
||||
#(e.g. by Django)
|
||||
kwargs['strm'] = kwargs.pop('stream')
|
||||
result = factory(**kwargs)
|
||||
if formatter:
|
||||
result.setFormatter(formatter)
|
||||
if level is not None:
|
||||
result.setLevel(_checkLevel(level))
|
||||
if filters:
|
||||
self.add_filters(result, filters)
|
||||
return result
|
||||
|
||||
def add_handlers(self, logger, handlers):
|
||||
"""Add handlers to a logger from a list of names."""
|
||||
for h in handlers:
|
||||
try:
|
||||
logger.addHandler(self.config['handlers'][h])
|
||||
except StandardError:
|
||||
e = sys.exc_info()[1]
|
||||
raise ValueError('Unable to add handler %r: %s' % (h, e))
|
||||
|
||||
def common_logger_config(self, logger, config, incremental=False):
|
||||
"""
|
||||
Perform configuration which is common to root and non-root loggers.
|
||||
"""
|
||||
level = config.get('level', None)
|
||||
if level is not None:
|
||||
logger.setLevel(_checkLevel(level))
|
||||
if not incremental:
|
||||
#Remove any existing handlers
|
||||
for h in logger.handlers[:]:
|
||||
logger.removeHandler(h)
|
||||
handlers = config.get('handlers', None)
|
||||
if handlers:
|
||||
self.add_handlers(logger, handlers)
|
||||
filters = config.get('filters', None)
|
||||
if filters:
|
||||
self.add_filters(logger, filters)
|
||||
|
||||
def configure_logger(self, name, config, incremental=False):
|
||||
"""Configure a non-root logger from a dictionary."""
|
||||
logger = logging.getLogger(name)
|
||||
self.common_logger_config(logger, config, incremental)
|
||||
propagate = config.get('propagate', None)
|
||||
if propagate is not None:
|
||||
logger.propagate = propagate
|
||||
|
||||
def configure_root(self, config, incremental=False):
|
||||
"""Configure a root logger from a dictionary."""
|
||||
root = logging.getLogger()
|
||||
self.common_logger_config(root, config, incremental)
|
||||
|
||||
dictConfigClass = DictConfigurator
|
||||
|
||||
def dictConfig(config):
|
||||
"""Configure logging using a dictionary."""
|
||||
dictConfigClass(config).configure()
|
|
@ -1,90 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
import logging
|
||||
|
||||
class HTTPHandler(logging.Handler):
|
||||
"""
|
||||
A class which sends records to a Web server, using either GET or
|
||||
POST semantics.
|
||||
|
||||
:param host: The Web server to connect to.
|
||||
:param url: The URL to use for the connection.
|
||||
:param method: The HTTP method to use. GET and POST are supported.
|
||||
:param secure: set to True if HTTPS is to be used.
|
||||
:param credentials: Set to a username/password tuple if desired. If
|
||||
set, a Basic authentication header is sent. WARNING:
|
||||
if using credentials, make sure `secure` is `True`
|
||||
to avoid sending usernames and passwords in
|
||||
cleartext over the wire.
|
||||
"""
|
||||
def __init__(self, host, url, method="GET", secure=False, credentials=None):
|
||||
"""
|
||||
Initialize an instance.
|
||||
"""
|
||||
logging.Handler.__init__(self)
|
||||
method = method.upper()
|
||||
if method not in ["GET", "POST"]:
|
||||
raise ValueError("method must be GET or POST")
|
||||
self.host = host
|
||||
self.url = url
|
||||
self.method = method
|
||||
self.secure = secure
|
||||
self.credentials = credentials
|
||||
|
||||
def mapLogRecord(self, record):
|
||||
"""
|
||||
Default implementation of mapping the log record into a dict
|
||||
that is sent as the CGI data. Overwrite in your class.
|
||||
Contributed by Franz Glasner.
|
||||
|
||||
:param record: The record to be mapped.
|
||||
"""
|
||||
return record.__dict__
|
||||
|
||||
def emit(self, record):
|
||||
"""
|
||||
Emit a record.
|
||||
|
||||
Send the record to the Web server as a percent-encoded dictionary
|
||||
|
||||
:param record: The record to be emitted.
|
||||
"""
|
||||
try:
|
||||
import http.client, urllib.parse
|
||||
host = self.host
|
||||
if self.secure:
|
||||
h = http.client.HTTPSConnection(host)
|
||||
else:
|
||||
h = http.client.HTTPConnection(host)
|
||||
url = self.url
|
||||
data = urllib.parse.urlencode(self.mapLogRecord(record))
|
||||
if self.method == "GET":
|
||||
if (url.find('?') >= 0):
|
||||
sep = '&'
|
||||
else:
|
||||
sep = '?'
|
||||
url = url + "%c%s" % (sep, data)
|
||||
h.putrequest(self.method, url)
|
||||
# support multiple hosts on one IP address...
|
||||
# need to strip optional :port from host, if present
|
||||
i = host.find(":")
|
||||
if i >= 0:
|
||||
host = host[:i]
|
||||
h.putheader("Host", host)
|
||||
if self.method == "POST":
|
||||
h.putheader("Content-type",
|
||||
"application/x-www-form-urlencoded")
|
||||
h.putheader("Content-length", str(len(data)))
|
||||
if self.credentials:
|
||||
import base64
|
||||
s = ('u%s:%s' % self.credentials).encode('utf-8')
|
||||
s = 'Basic ' + base64.b64encode(s).strip()
|
||||
h.putheader('Authorization', s)
|
||||
h.endheaders(data if self.method == "POST" else None)
|
||||
h.getresponse() #can't do anything with the result
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except:
|
||||
self.handleError(record)
|
||||
|
|
@ -1,225 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
"""
|
||||
This module contains classes which help you work with queues. A typical
|
||||
application is when you want to log from performance-critical threads, but
|
||||
where the handlers you want to use are slow (for example,
|
||||
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
|
||||
pass it to a :class:`QueueHandler` instance and use that instance with your
|
||||
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
|
||||
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
|
||||
This will start monitoring the queue on a separate thread and call all the
|
||||
configured handlers *on that thread*, so that your logging thread is not held
|
||||
up by the slow handlers.
|
||||
|
||||
Note that as well as in-process queues, you can use these classes with queues
|
||||
from the :mod:`multiprocessing` module.
|
||||
|
||||
**N.B.** This is part of the standard library since Python 3.2, so the
|
||||
version here is for use with earlier Python versions.
|
||||
"""
|
||||
import logging
|
||||
try:
|
||||
import Queue as queue
|
||||
except ImportError:
|
||||
import queue
|
||||
import threading
|
||||
|
||||
class QueueHandler(logging.Handler):
|
||||
"""
|
||||
This handler sends events to a queue. Typically, it would be used together
|
||||
with a multiprocessing Queue to centralise logging to file in one process
|
||||
(in a multi-process application), so as to avoid file write contention
|
||||
between processes.
|
||||
|
||||
:param queue: The queue to send `LogRecords` to.
|
||||
"""
|
||||
|
||||
def __init__(self, queue):
|
||||
"""
|
||||
Initialise an instance, using the passed queue.
|
||||
"""
|
||||
logging.Handler.__init__(self)
|
||||
self.queue = queue
|
||||
|
||||
def enqueue(self, record):
|
||||
"""
|
||||
Enqueue a record.
|
||||
|
||||
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
|
||||
want to override this method if you want to use blocking, timeouts or
|
||||
custom queue implementations.
|
||||
|
||||
:param record: The record to enqueue.
|
||||
"""
|
||||
self.queue.put_nowait(record)
|
||||
|
||||
def prepare(self, record):
|
||||
"""
|
||||
Prepares a record for queuing. The object returned by this method is
|
||||
enqueued.
|
||||
|
||||
The base implementation formats the record to merge the message
|
||||
and arguments, and removes unpickleable items from the record
|
||||
in-place.
|
||||
|
||||
You might want to override this method if you want to convert
|
||||
the record to a dict or JSON string, or send a modified copy
|
||||
of the record while leaving the original intact.
|
||||
|
||||
:param record: The record to prepare.
|
||||
"""
|
||||
# The format operation gets traceback text into record.exc_text
|
||||
# (if there's exception data), and also puts the message into
|
||||
# record.message. We can then use this to replace the original
|
||||
# msg + args, as these might be unpickleable. We also zap the
|
||||
# exc_info attribute, as it's no longer needed and, if not None,
|
||||
# will typically not be pickleable.
|
||||
self.format(record)
|
||||
record.msg = record.message
|
||||
record.args = None
|
||||
record.exc_info = None
|
||||
return record
|
||||
|
||||
def emit(self, record):
|
||||
"""
|
||||
Emit a record.
|
||||
|
||||
Writes the LogRecord to the queue, preparing it for pickling first.
|
||||
|
||||
:param record: The record to emit.
|
||||
"""
|
||||
try:
|
||||
self.enqueue(self.prepare(record))
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except:
|
||||
self.handleError(record)
|
||||
|
||||
class QueueListener(object):
|
||||
"""
|
||||
This class implements an internal threaded listener which watches for
|
||||
LogRecords being added to a queue, removes them and passes them to a
|
||||
list of handlers for processing.
|
||||
|
||||
:param record: The queue to listen to.
|
||||
:param handlers: The handlers to invoke on everything received from
|
||||
the queue.
|
||||
"""
|
||||
_sentinel = None
|
||||
|
||||
def __init__(self, queue, *handlers):
|
||||
"""
|
||||
Initialise an instance with the specified queue and
|
||||
handlers.
|
||||
"""
|
||||
self.queue = queue
|
||||
self.handlers = handlers
|
||||
self._stop = threading.Event()
|
||||
self._thread = None
|
||||
|
||||
def dequeue(self, block):
|
||||
"""
|
||||
Dequeue a record and return it, optionally blocking.
|
||||
|
||||
The base implementation uses :meth:`~queue.Queue.get`. You may want to
|
||||
override this method if you want to use timeouts or work with custom
|
||||
queue implementations.
|
||||
|
||||
:param block: Whether to block if the queue is empty. If `False` and
|
||||
the queue is empty, an :class:`~queue.Empty` exception
|
||||
will be thrown.
|
||||
"""
|
||||
return self.queue.get(block)
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start the listener.
|
||||
|
||||
This starts up a background thread to monitor the queue for
|
||||
LogRecords to process.
|
||||
"""
|
||||
self._thread = t = threading.Thread(target=self._monitor)
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
||||
def prepare(self , record):
|
||||
"""
|
||||
Prepare a record for handling.
|
||||
|
||||
This method just returns the passed-in record. You may want to
|
||||
override this method if you need to do any custom marshalling or
|
||||
manipulation of the record before passing it to the handlers.
|
||||
|
||||
:param record: The record to prepare.
|
||||
"""
|
||||
return record
|
||||
|
||||
def handle(self, record):
|
||||
"""
|
||||
Handle a record.
|
||||
|
||||
This just loops through the handlers offering them the record
|
||||
to handle.
|
||||
|
||||
:param record: The record to handle.
|
||||
"""
|
||||
record = self.prepare(record)
|
||||
for handler in self.handlers:
|
||||
handler.handle(record)
|
||||
|
||||
def _monitor(self):
|
||||
"""
|
||||
Monitor the queue for records, and ask the handler
|
||||
to deal with them.
|
||||
|
||||
This method runs on a separate, internal thread.
|
||||
The thread will terminate if it sees a sentinel object in the queue.
|
||||
"""
|
||||
q = self.queue
|
||||
has_task_done = hasattr(q, 'task_done')
|
||||
while not self._stop.isSet():
|
||||
try:
|
||||
record = self.dequeue(True)
|
||||
if record is self._sentinel:
|
||||
break
|
||||
self.handle(record)
|
||||
if has_task_done:
|
||||
q.task_done()
|
||||
except queue.Empty:
|
||||
pass
|
||||
# There might still be records in the queue.
|
||||
while True:
|
||||
try:
|
||||
record = self.dequeue(False)
|
||||
if record is self._sentinel:
|
||||
break
|
||||
self.handle(record)
|
||||
if has_task_done:
|
||||
q.task_done()
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
def enqueue_sentinel(self):
|
||||
"""
|
||||
Writes a sentinel to the queue to tell the listener to quit. This
|
||||
implementation uses ``put_nowait()``. You may want to override this
|
||||
method if you want to use timeouts or work with custom queue
|
||||
implementations.
|
||||
"""
|
||||
self.queue.put_nowait(self._sentinel)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the listener.
|
||||
|
||||
This asks the thread to terminate, and then waits for it to do so.
|
||||
Note that if you don't call this before your application exits, there
|
||||
may be some records still left on the queue, which won't be processed.
|
||||
"""
|
||||
self._stop.set()
|
||||
self.enqueue_sentinel()
|
||||
self._thread.join()
|
||||
self._thread = None
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2011-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
"""
|
||||
This module contains classes which help you work with Redis queues.
|
||||
"""
|
||||
|
||||
from logutils.queue import QueueHandler, QueueListener
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
class RedisQueueHandler(QueueHandler):
|
||||
"""
|
||||
A QueueHandler implementation which pushes pickled
|
||||
records to a Redis queue using a specified key.
|
||||
|
||||
:param key: The key to use for the queue. Defaults to
|
||||
"python.logging".
|
||||
:param redis: If specified, this instance is used to
|
||||
communicate with a Redis instance.
|
||||
:param limit: If specified, the queue is restricted to
|
||||
have only this many elements.
|
||||
"""
|
||||
def __init__(self, key='python.logging', redis=None, limit=0):
|
||||
if redis is None:
|
||||
from redis import Redis
|
||||
redis = Redis()
|
||||
self.key = key
|
||||
assert limit >= 0
|
||||
self.limit = limit
|
||||
QueueHandler.__init__(self, redis)
|
||||
|
||||
def enqueue(self, record):
|
||||
s = pickle.dumps(vars(record))
|
||||
self.queue.rpush(self.key, s)
|
||||
if self.limit:
|
||||
self.queue.ltrim(self.key, -self.limit, -1)
|
||||
|
||||
class RedisQueueListener(QueueListener):
|
||||
"""
|
||||
A QueueListener implementation which fetches pickled
|
||||
records from a Redis queue using a specified key.
|
||||
|
||||
:param key: The key to use for the queue. Defaults to
|
||||
"python.logging".
|
||||
:param redis: If specified, this instance is used to
|
||||
communicate with a Redis instance.
|
||||
"""
|
||||
def __init__(self, *handlers, **kwargs):
|
||||
redis = kwargs.get('redis')
|
||||
if redis is None:
|
||||
from redis import Redis
|
||||
redis = Redis()
|
||||
self.key = kwargs.get('key', 'python.logging')
|
||||
QueueListener.__init__(self, redis, *handlers)
|
||||
|
||||
def dequeue(self, block):
|
||||
"""
|
||||
Dequeue and return a record.
|
||||
"""
|
||||
if block:
|
||||
s = self.queue.blpop(self.key)[1]
|
||||
else:
|
||||
s = self.queue.lpop(self.key)
|
||||
if not s:
|
||||
record = None
|
||||
else:
|
||||
record = pickle.loads(s)
|
||||
return record
|
||||
|
||||
def enqueue_sentinel(self):
|
||||
self.queue.rpush(self.key, '')
|
||||
|
|
@ -1,156 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
|
||||
#
|
||||
import logging
|
||||
from logging.handlers import BufferingHandler
|
||||
|
||||
class TestHandler(BufferingHandler):
|
||||
"""
|
||||
This handler collects records in a buffer for later inspection by
|
||||
your unit test code.
|
||||
|
||||
:param matcher: The :class:`~logutils.testing.Matcher` instance to
|
||||
use for matching.
|
||||
"""
|
||||
def __init__(self, matcher):
|
||||
# BufferingHandler takes a "capacity" argument
|
||||
# so as to know when to flush. As we're overriding
|
||||
# shouldFlush anyway, we can set a capacity of zero.
|
||||
# You can call flush() manually to clear out the
|
||||
# buffer.
|
||||
BufferingHandler.__init__(self, 0)
|
||||
self.formatted = []
|
||||
self.matcher = matcher
|
||||
|
||||
def shouldFlush(self):
|
||||
"""
|
||||
Should the buffer be flushed?
|
||||
|
||||
This returns `False` - you'll need to flush manually, usually after
|
||||
your unit test code checks the buffer contents against your
|
||||
expectations.
|
||||
"""
|
||||
return False
|
||||
|
||||
def emit(self, record):
|
||||
"""
|
||||
Saves the `__dict__` of the record in the `buffer` attribute,
|
||||
and the formatted records in the `formatted` attribute.
|
||||
|
||||
:param record: The record to emit.
|
||||
"""
|
||||
self.formatted.append(self.format(record))
|
||||
self.buffer.append(record.__dict__)
|
||||
|
||||
def flush(self):
|
||||
"""
|
||||
Clears out the `buffer` and `formatted` attributes.
|
||||
"""
|
||||
BufferingHandler.flush(self)
|
||||
self.formatted = []
|
||||
|
||||
def matches(self, **kwargs):
|
||||
"""
|
||||
Look for a saved dict whose keys/values match the supplied arguments.
|
||||
|
||||
Return `True` if found, else `False`.
|
||||
|
||||
:param kwargs: A set of keyword arguments whose names are LogRecord
|
||||
attributes and whose values are what you want to
|
||||
match in a stored LogRecord.
|
||||
"""
|
||||
result = False
|
||||
for d in self.buffer:
|
||||
if self.matcher.matches(d, **kwargs):
|
||||
result = True
|
||||
break
|
||||
#if not result:
|
||||
# print('*** matcher failed completely on %d records' % len(self.buffer))
|
||||
return result
|
||||
|
||||
def matchall(self, kwarglist):
|
||||
"""
|
||||
Accept a list of keyword argument values and ensure that the handler's
|
||||
buffer of stored records matches the list one-for-one.
|
||||
|
||||
Return `True` if exactly matched, else `False`.
|
||||
|
||||
:param kwarglist: A list of keyword-argument dictionaries, each of
|
||||
which will be passed to :meth:`matches` with the
|
||||
corresponding record from the buffer.
|
||||
"""
|
||||
if self.count != len(kwarglist):
|
||||
result = False
|
||||
else:
|
||||
result = True
|
||||
for d, kwargs in zip(self.buffer, kwarglist):
|
||||
if not self.matcher.matches(d, **kwargs):
|
||||
result = False
|
||||
break
|
||||
return result
|
||||
|
||||
@property
|
||||
def count(self):
|
||||
"""
|
||||
The number of records in the buffer.
|
||||
"""
|
||||
return len(self.buffer)
|
||||
|
||||
class Matcher(object):
|
||||
"""
|
||||
This utility class matches a stored dictionary of
|
||||
:class:`logging.LogRecord` attributes with keyword arguments
|
||||
passed to its :meth:`~logutils.testing.Matcher.matches` method.
|
||||
"""
|
||||
|
||||
_partial_matches = ('msg', 'message')
|
||||
"""
|
||||
A list of :class:`logging.LogRecord` attribute names which
|
||||
will be checked for partial matches. If not in this list,
|
||||
an exact match will be attempted.
|
||||
"""
|
||||
|
||||
def matches(self, d, **kwargs):
|
||||
"""
|
||||
Try to match a single dict with the supplied arguments.
|
||||
|
||||
Keys whose values are strings and which are in self._partial_matches
|
||||
will be checked for partial (i.e. substring) matches. You can extend
|
||||
this scheme to (for example) do regular expression matching, etc.
|
||||
|
||||
Return `True` if found, else `False`.
|
||||
|
||||
:param kwargs: A set of keyword arguments whose names are LogRecord
|
||||
attributes and whose values are what you want to
|
||||
match in a stored LogRecord.
|
||||
"""
|
||||
result = True
|
||||
for k in kwargs:
|
||||
v = kwargs[k]
|
||||
dv = d.get(k)
|
||||
if not self.match_value(k, dv, v):
|
||||
#print('*** matcher failed: %s, %r, %r' % (k, dv, v))
|
||||
result = False
|
||||
break
|
||||
return result
|
||||
|
||||
def match_value(self, k, dv, v):
|
||||
"""
|
||||
Try to match a single stored value (dv) with a supplied value (v).
|
||||
|
||||
Return `True` if found, else `False`.
|
||||
|
||||
:param k: The key value (LogRecord attribute name).
|
||||
:param dv: The stored value to match against.
|
||||
:param v: The value to compare with the stored value.
|
||||
"""
|
||||
if type(v) != type(dv):
|
||||
result = False
|
||||
elif type(dv) is not str or k not in self._partial_matches:
|
||||
result = (v == dv)
|
||||
else:
|
||||
result = dv.find(v) >= 0
|
||||
#if not result:
|
||||
# print('*** matcher failed on %s: %r vs. %r' % (k, dv, v))
|
||||
return result
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue