mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-08-19 21:03:21 -07:00
Initial Commit
This commit is contained in:
commit
88daa3fb91
1311 changed files with 256240 additions and 0 deletions
5
lib/apscheduler/__init__.py
Normal file
5
lib/apscheduler/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
version_info = (3, 0, 1)
|
||||
version = '3.0.1'
|
||||
release = '3.0.1'
|
||||
|
||||
__version__ = release # PEP 396
|
73
lib/apscheduler/events.py
Normal file
73
lib/apscheduler/events.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
__all__ = ('EVENT_SCHEDULER_START', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED',
|
||||
'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED', 'EVENT_JOB_ADDED',
|
||||
'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED',
|
||||
'SchedulerEvent', 'JobEvent', 'JobExecutionEvent')
|
||||
|
||||
|
||||
EVENT_SCHEDULER_START = 1
|
||||
EVENT_SCHEDULER_SHUTDOWN = 2
|
||||
EVENT_EXECUTOR_ADDED = 4
|
||||
EVENT_EXECUTOR_REMOVED = 8
|
||||
EVENT_JOBSTORE_ADDED = 16
|
||||
EVENT_JOBSTORE_REMOVED = 32
|
||||
EVENT_ALL_JOBS_REMOVED = 64
|
||||
EVENT_JOB_ADDED = 128
|
||||
EVENT_JOB_REMOVED = 256
|
||||
EVENT_JOB_MODIFIED = 512
|
||||
EVENT_JOB_EXECUTED = 1024
|
||||
EVENT_JOB_ERROR = 2048
|
||||
EVENT_JOB_MISSED = 4096
|
||||
EVENT_ALL = (EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED |
|
||||
EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED |
|
||||
EVENT_JOB_ERROR | EVENT_JOB_MISSED)
|
||||
|
||||
|
||||
class SchedulerEvent(object):
|
||||
"""
|
||||
An event that concerns the scheduler itself.
|
||||
|
||||
:ivar code: the type code of this event
|
||||
:ivar alias: alias of the job store or executor that was added or removed (if applicable)
|
||||
"""
|
||||
|
||||
def __init__(self, code, alias=None):
|
||||
super(SchedulerEvent, self).__init__()
|
||||
self.code = code
|
||||
self.alias = alias
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s (code=%d)>' % (self.__class__.__name__, self.code)
|
||||
|
||||
|
||||
class JobEvent(SchedulerEvent):
|
||||
"""
|
||||
An event that concerns a job.
|
||||
|
||||
:ivar code: the type code of this event
|
||||
:ivar job_id: identifier of the job in question
|
||||
:ivar jobstore: alias of the job store containing the job in question
|
||||
"""
|
||||
|
||||
def __init__(self, code, job_id, jobstore):
|
||||
super(JobEvent, self).__init__(code)
|
||||
self.code = code
|
||||
self.job_id = job_id
|
||||
self.jobstore = jobstore
|
||||
|
||||
|
||||
class JobExecutionEvent(JobEvent):
|
||||
"""
|
||||
An event that concerns the execution of individual jobs.
|
||||
|
||||
:ivar scheduled_run_time: the time when the job was scheduled to be run
|
||||
:ivar retval: the return value of the successfully executed job
|
||||
:ivar exception: the exception raised by the job
|
||||
:ivar traceback: a formatted traceback for the exception
|
||||
"""
|
||||
|
||||
def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None, traceback=None):
|
||||
super(JobExecutionEvent, self).__init__(code, job_id, jobstore)
|
||||
self.scheduled_run_time = scheduled_run_time
|
||||
self.retval = retval
|
||||
self.exception = exception
|
||||
self.traceback = traceback
|
0
lib/apscheduler/executors/__init__.py
Normal file
0
lib/apscheduler/executors/__init__.py
Normal file
28
lib/apscheduler/executors/asyncio.py
Normal file
28
lib/apscheduler/executors/asyncio.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class AsyncIOExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs in the default executor of the event loop.
|
||||
|
||||
Plugin alias: ``asyncio``
|
||||
"""
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super(AsyncIOExecutor, self).start(scheduler, alias)
|
||||
self._eventloop = scheduler._eventloop
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(f):
|
||||
try:
|
||||
events = f.result()
|
||||
except:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
||||
|
||||
f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times, self._logger.name)
|
||||
f.add_done_callback(callback)
|
119
lib/apscheduler/executors/base.py
Normal file
119
lib/apscheduler/executors/base.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta
|
||||
from traceback import format_tb
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from pytz import utc
|
||||
import six
|
||||
|
||||
from apscheduler.events import JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED
|
||||
|
||||
|
||||
class MaxInstancesReachedError(Exception):
|
||||
def __init__(self, job):
|
||||
super(MaxInstancesReachedError, self).__init__(
|
||||
'Job "%s" has already reached its maximum number of instances (%d)' % (job.id, job.max_instances))
|
||||
|
||||
|
||||
class BaseExecutor(six.with_metaclass(ABCMeta, object)):
|
||||
"""Abstract base class that defines the interface that every executor must implement."""
|
||||
|
||||
_scheduler = None
|
||||
_lock = None
|
||||
_logger = logging.getLogger('apscheduler.executors')
|
||||
|
||||
def __init__(self):
|
||||
super(BaseExecutor, self).__init__()
|
||||
self._instances = defaultdict(lambda: 0)
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
"""
|
||||
Called by the scheduler when the scheduler is being started or when the executor is being added to an already
|
||||
running scheduler.
|
||||
|
||||
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this executor
|
||||
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
|
||||
"""
|
||||
|
||||
self._scheduler = scheduler
|
||||
self._lock = scheduler._create_lock()
|
||||
self._logger = logging.getLogger('apscheduler.executors.%s' % alias)
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
"""
|
||||
Shuts down this executor.
|
||||
|
||||
:param bool wait: ``True`` to wait until all submitted jobs have been executed
|
||||
"""
|
||||
|
||||
def submit_job(self, job, run_times):
|
||||
"""
|
||||
Submits job for execution.
|
||||
|
||||
:param Job job: job to execute
|
||||
:param list[datetime] run_times: list of datetimes specifying when the job should have been run
|
||||
:raises MaxInstancesReachedError: if the maximum number of allowed instances for this job has been reached
|
||||
"""
|
||||
|
||||
assert self._lock is not None, 'This executor has not been started yet'
|
||||
with self._lock:
|
||||
if self._instances[job.id] >= job.max_instances:
|
||||
raise MaxInstancesReachedError(job)
|
||||
|
||||
self._do_submit_job(job, run_times)
|
||||
self._instances[job.id] += 1
|
||||
|
||||
@abstractmethod
|
||||
def _do_submit_job(self, job, run_times):
|
||||
"""Performs the actual task of scheduling `run_job` to be called."""
|
||||
|
||||
def _run_job_success(self, job_id, events):
|
||||
"""Called by the executor with the list of generated events when `run_job` has been successfully called."""
|
||||
|
||||
with self._lock:
|
||||
self._instances[job_id] -= 1
|
||||
|
||||
for event in events:
|
||||
self._scheduler._dispatch_event(event)
|
||||
|
||||
def _run_job_error(self, job_id, exc, traceback=None):
|
||||
"""Called by the executor with the exception if there is an error calling `run_job`."""
|
||||
|
||||
with self._lock:
|
||||
self._instances[job_id] -= 1
|
||||
|
||||
exc_info = (exc.__class__, exc, traceback)
|
||||
self._logger.error('Error running job %s', job_id, exc_info=exc_info)
|
||||
|
||||
|
||||
def run_job(job, jobstore_alias, run_times, logger_name):
|
||||
"""Called by executors to run the job. Returns a list of scheduler events to be dispatched by the scheduler."""
|
||||
|
||||
events = []
|
||||
logger = logging.getLogger(logger_name)
|
||||
for run_time in run_times:
|
||||
# See if the job missed its run time window, and handle possible misfires accordingly
|
||||
if job.misfire_grace_time is not None:
|
||||
difference = datetime.now(utc) - run_time
|
||||
grace_time = timedelta(seconds=job.misfire_grace_time)
|
||||
if difference > grace_time:
|
||||
events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, run_time))
|
||||
logger.warning('Run time of job "%s" was missed by %s', job, difference)
|
||||
continue
|
||||
|
||||
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
|
||||
try:
|
||||
retval = job.func(*job.args, **job.kwargs)
|
||||
except:
|
||||
exc, tb = sys.exc_info()[1:]
|
||||
formatted_tb = ''.join(format_tb(tb))
|
||||
events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc,
|
||||
traceback=formatted_tb))
|
||||
logger.exception('Job "%s" raised an exception', job)
|
||||
else:
|
||||
events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval))
|
||||
logger.info('Job "%s" executed successfully', job)
|
||||
|
||||
return events
|
19
lib/apscheduler/executors/debug.py
Normal file
19
lib/apscheduler/executors/debug.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class DebugExecutor(BaseExecutor):
|
||||
"""
|
||||
A special executor that executes the target callable directly instead of deferring it to a thread or process.
|
||||
|
||||
Plugin alias: ``debug``
|
||||
"""
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
try:
|
||||
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
|
||||
except:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
29
lib/apscheduler/executors/gevent.py
Normal file
29
lib/apscheduler/executors/gevent.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
try:
|
||||
import gevent
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('GeventExecutor requires gevent installed')
|
||||
|
||||
|
||||
class GeventExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs as greenlets.
|
||||
|
||||
Plugin alias: ``gevent``
|
||||
"""
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(greenlet):
|
||||
try:
|
||||
events = greenlet.get()
|
||||
except:
|
||||
self._run_job_error(job.id, *sys.exc_info()[1:])
|
||||
else:
|
||||
self._run_job_success(job.id, events)
|
||||
|
||||
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).link(callback)
|
54
lib/apscheduler/executors/pool.py
Normal file
54
lib/apscheduler/executors/pool.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
from abc import abstractmethod
|
||||
import concurrent.futures
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class BasePoolExecutor(BaseExecutor):
|
||||
@abstractmethod
|
||||
def __init__(self, pool):
|
||||
super(BasePoolExecutor, self).__init__()
|
||||
self._pool = pool
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(f):
|
||||
exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else
|
||||
(f.exception(), getattr(f.exception(), '__traceback__', None)))
|
||||
if exc:
|
||||
self._run_job_error(job.id, exc, tb)
|
||||
else:
|
||||
self._run_job_success(job.id, f.result())
|
||||
|
||||
f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
|
||||
f.add_done_callback(callback)
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
self._pool.shutdown(wait)
|
||||
|
||||
|
||||
class ThreadPoolExecutor(BasePoolExecutor):
|
||||
"""
|
||||
An executor that runs jobs in a concurrent.futures thread pool.
|
||||
|
||||
Plugin alias: ``threadpool``
|
||||
|
||||
:param max_workers: the maximum number of spawned threads.
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers=10):
|
||||
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers))
|
||||
super(ThreadPoolExecutor, self).__init__(pool)
|
||||
|
||||
|
||||
class ProcessPoolExecutor(BasePoolExecutor):
|
||||
"""
|
||||
An executor that runs jobs in a concurrent.futures process pool.
|
||||
|
||||
Plugin alias: ``processpool``
|
||||
|
||||
:param max_workers: the maximum number of spawned processes.
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers=10):
|
||||
pool = concurrent.futures.ProcessPoolExecutor(int(max_workers))
|
||||
super(ProcessPoolExecutor, self).__init__(pool)
|
25
lib/apscheduler/executors/twisted.py
Normal file
25
lib/apscheduler/executors/twisted.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
class TwistedExecutor(BaseExecutor):
|
||||
"""
|
||||
Runs jobs in the reactor's thread pool.
|
||||
|
||||
Plugin alias: ``twisted``
|
||||
"""
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
super(TwistedExecutor, self).start(scheduler, alias)
|
||||
self._reactor = scheduler._reactor
|
||||
|
||||
def _do_submit_job(self, job, run_times):
|
||||
def callback(success, result):
|
||||
if success:
|
||||
self._run_job_success(job.id, result)
|
||||
else:
|
||||
self._run_job_error(job.id, result.value, result.tb)
|
||||
|
||||
self._reactor.getThreadPool().callInThreadWithCallback(callback, run_job, job, job._jobstore_alias, run_times,
|
||||
self._logger.name)
|
252
lib/apscheduler/job.py
Normal file
252
lib/apscheduler/job.py
Normal file
|
@ -0,0 +1,252 @@
|
|||
from collections import Iterable, Mapping
|
||||
from uuid import uuid4
|
||||
|
||||
import six
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, \
|
||||
convert_to_datetime
|
||||
|
||||
|
||||
class Job(object):
|
||||
"""
|
||||
Contains the options given when scheduling callables and its current schedule and other state.
|
||||
This class should never be instantiated by the user.
|
||||
|
||||
:var str id: the unique identifier of this job
|
||||
:var str name: the description of this job
|
||||
:var func: the callable to execute
|
||||
:var tuple|list args: positional arguments to the callable
|
||||
:var dict kwargs: keyword arguments to the callable
|
||||
:var bool coalesce: whether to only run the job once when several run times are due
|
||||
:var trigger: the trigger object that controls the schedule of this job
|
||||
:var str executor: the name of the executor that will run this job
|
||||
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to be late
|
||||
:var int max_instances: the maximum number of concurrently executing instances allowed for this job
|
||||
:var datetime.datetime next_run_time: the next scheduled run time of this job
|
||||
"""
|
||||
|
||||
__slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', 'args', 'kwargs',
|
||||
'name', 'misfire_grace_time', 'coalesce', 'max_instances', 'next_run_time')
|
||||
|
||||
def __init__(self, scheduler, id=None, **kwargs):
|
||||
super(Job, self).__init__()
|
||||
self._scheduler = scheduler
|
||||
self._jobstore_alias = None
|
||||
self._modify(id=id or uuid4().hex, **kwargs)
|
||||
|
||||
def modify(self, **changes):
|
||||
"""
|
||||
Makes the given changes to this job and saves it in the associated job store.
|
||||
Accepted keyword arguments are the same as the variables on this class.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
|
||||
"""
|
||||
|
||||
self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
|
||||
|
||||
def reschedule(self, trigger, **trigger_args):
|
||||
"""
|
||||
Shortcut for switching the trigger on this job.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
|
||||
"""
|
||||
|
||||
self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args)
|
||||
|
||||
def pause(self):
|
||||
"""
|
||||
Temporarily suspend the execution of this job.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
|
||||
"""
|
||||
|
||||
self._scheduler.pause_job(self.id, self._jobstore_alias)
|
||||
|
||||
def resume(self):
|
||||
"""
|
||||
Resume the schedule of this job if previously paused.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
|
||||
"""
|
||||
|
||||
self._scheduler.resume_job(self.id, self._jobstore_alias)
|
||||
|
||||
def remove(self):
|
||||
"""
|
||||
Unschedules this job and removes it from its associated job store.
|
||||
|
||||
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
|
||||
"""
|
||||
|
||||
self._scheduler.remove_job(self.id, self._jobstore_alias)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
"""Returns ``True`` if the referenced job is still waiting to be added to its designated job store."""
|
||||
|
||||
return self._jobstore_alias is None
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
def _get_run_times(self, now):
|
||||
"""
|
||||
Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
|
||||
|
||||
:type now: datetime.datetime
|
||||
:rtype: list[datetime.datetime]
|
||||
"""
|
||||
|
||||
run_times = []
|
||||
next_run_time = self.next_run_time
|
||||
while next_run_time and next_run_time <= now:
|
||||
run_times.append(next_run_time)
|
||||
next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
|
||||
|
||||
return run_times
|
||||
|
||||
def _modify(self, **changes):
|
||||
"""Validates the changes to the Job and makes the modifications if and only if all of them validate."""
|
||||
|
||||
approved = {}
|
||||
|
||||
if 'id' in changes:
|
||||
value = changes.pop('id')
|
||||
if not isinstance(value, six.string_types):
|
||||
raise TypeError("id must be a nonempty string")
|
||||
if hasattr(self, 'id'):
|
||||
raise ValueError('The job ID may not be changed')
|
||||
approved['id'] = value
|
||||
|
||||
if 'func' in changes or 'args' in changes or 'kwargs' in changes:
|
||||
func = changes.pop('func') if 'func' in changes else self.func
|
||||
args = changes.pop('args') if 'args' in changes else self.args
|
||||
kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs
|
||||
|
||||
if isinstance(func, str):
|
||||
func_ref = func
|
||||
func = ref_to_obj(func)
|
||||
elif callable(func):
|
||||
try:
|
||||
func_ref = obj_to_ref(func)
|
||||
except ValueError:
|
||||
# If this happens, this Job won't be serializable
|
||||
func_ref = None
|
||||
else:
|
||||
raise TypeError('func must be a callable or a textual reference to one')
|
||||
|
||||
if not hasattr(self, 'name') and changes.get('name', None) is None:
|
||||
changes['name'] = get_callable_name(func)
|
||||
|
||||
if isinstance(args, six.string_types) or not isinstance(args, Iterable):
|
||||
raise TypeError('args must be a non-string iterable')
|
||||
if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping):
|
||||
raise TypeError('kwargs must be a dict-like object')
|
||||
|
||||
check_callable_args(func, args, kwargs)
|
||||
|
||||
approved['func'] = func
|
||||
approved['func_ref'] = func_ref
|
||||
approved['args'] = args
|
||||
approved['kwargs'] = kwargs
|
||||
|
||||
if 'name' in changes:
|
||||
value = changes.pop('name')
|
||||
if not value or not isinstance(value, six.string_types):
|
||||
raise TypeError("name must be a nonempty string")
|
||||
approved['name'] = value
|
||||
|
||||
if 'misfire_grace_time' in changes:
|
||||
value = changes.pop('misfire_grace_time')
|
||||
if value is not None and (not isinstance(value, six.integer_types) or value <= 0):
|
||||
raise TypeError('misfire_grace_time must be either None or a positive integer')
|
||||
approved['misfire_grace_time'] = value
|
||||
|
||||
if 'coalesce' in changes:
|
||||
value = bool(changes.pop('coalesce'))
|
||||
approved['coalesce'] = value
|
||||
|
||||
if 'max_instances' in changes:
|
||||
value = changes.pop('max_instances')
|
||||
if not isinstance(value, six.integer_types) or value <= 0:
|
||||
raise TypeError('max_instances must be a positive integer')
|
||||
approved['max_instances'] = value
|
||||
|
||||
if 'trigger' in changes:
|
||||
trigger = changes.pop('trigger')
|
||||
if not isinstance(trigger, BaseTrigger):
|
||||
raise TypeError('Expected a trigger instance, got %s instead' % trigger.__class__.__name__)
|
||||
|
||||
approved['trigger'] = trigger
|
||||
|
||||
if 'executor' in changes:
|
||||
value = changes.pop('executor')
|
||||
if not isinstance(value, six.string_types):
|
||||
raise TypeError('executor must be a string')
|
||||
approved['executor'] = value
|
||||
|
||||
if 'next_run_time' in changes:
|
||||
value = changes.pop('next_run_time')
|
||||
approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, 'next_run_time')
|
||||
|
||||
if changes:
|
||||
raise AttributeError('The following are not modifiable attributes of Job: %s' % ', '.join(changes))
|
||||
|
||||
for key, value in six.iteritems(approved):
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getstate__(self):
|
||||
# Don't allow this Job to be serialized if the function reference could not be determined
|
||||
if not self.func_ref:
|
||||
raise ValueError('This Job cannot be serialized since the reference to its callable (%r) could not be '
|
||||
'determined. Consider giving a textual reference (module:function name) instead.' %
|
||||
(self.func,))
|
||||
|
||||
return {
|
||||
'version': 1,
|
||||
'id': self.id,
|
||||
'func': self.func_ref,
|
||||
'trigger': self.trigger,
|
||||
'executor': self.executor,
|
||||
'args': self.args,
|
||||
'kwargs': self.kwargs,
|
||||
'name': self.name,
|
||||
'misfire_grace_time': self.misfire_grace_time,
|
||||
'coalesce': self.coalesce,
|
||||
'max_instances': self.max_instances,
|
||||
'next_run_time': self.next_run_time
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
if state.get('version', 1) > 1:
|
||||
raise ValueError('Job has version %s, but only version 1 can be handled' % state['version'])
|
||||
|
||||
self.id = state['id']
|
||||
self.func_ref = state['func']
|
||||
self.func = ref_to_obj(self.func_ref)
|
||||
self.trigger = state['trigger']
|
||||
self.executor = state['executor']
|
||||
self.args = state['args']
|
||||
self.kwargs = state['kwargs']
|
||||
self.name = state['name']
|
||||
self.misfire_grace_time = state['misfire_grace_time']
|
||||
self.coalesce = state['coalesce']
|
||||
self.max_instances = state['max_instances']
|
||||
self.next_run_time = state['next_run_time']
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Job):
|
||||
return self.id == other.id
|
||||
return NotImplemented
|
||||
|
||||
def __repr__(self):
|
||||
return '<Job (id=%s name=%s)>' % (repr_escape(self.id), repr_escape(self.name))
|
||||
|
||||
def __str__(self):
|
||||
return '%s (trigger: %s, next run at: %s)' % (repr_escape(self.name), repr_escape(str(self.trigger)),
|
||||
datetime_repr(self.next_run_time))
|
||||
|
||||
def __unicode__(self):
|
||||
return six.u('%s (trigger: %s, next run at: %s)') % (self.name, self.trigger, datetime_repr(self.next_run_time))
|
0
lib/apscheduler/jobstores/__init__.py
Normal file
0
lib/apscheduler/jobstores/__init__.py
Normal file
127
lib/apscheduler/jobstores/base.py
Normal file
127
lib/apscheduler/jobstores/base.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class JobLookupError(KeyError):
|
||||
"""Raised when the job store cannot find a job for update or removal."""
|
||||
|
||||
def __init__(self, job_id):
|
||||
super(JobLookupError, self).__init__(six.u('No job by the id of %s was found') % job_id)
|
||||
|
||||
|
||||
class ConflictingIdError(KeyError):
|
||||
"""Raised when the uniqueness of job IDs is being violated."""
|
||||
|
||||
def __init__(self, job_id):
|
||||
super(ConflictingIdError, self).__init__(six.u('Job identifier (%s) conflicts with an existing job') % job_id)
|
||||
|
||||
|
||||
class TransientJobError(ValueError):
|
||||
"""Raised when an attempt to add transient (with no func_ref) job to a persistent job store is detected."""
|
||||
|
||||
def __init__(self, job_id):
|
||||
super(TransientJobError, self).__init__(
|
||||
six.u('Job (%s) cannot be added to this job store because a reference to the callable could not be '
|
||||
'determined.') % job_id)
|
||||
|
||||
|
||||
class BaseJobStore(six.with_metaclass(ABCMeta)):
|
||||
"""Abstract base class that defines the interface that every job store must implement."""
|
||||
|
||||
_scheduler = None
|
||||
_alias = None
|
||||
_logger = logging.getLogger('apscheduler.jobstores')
|
||||
|
||||
def start(self, scheduler, alias):
|
||||
"""
|
||||
Called by the scheduler when the scheduler is being started or when the job store is being added to an already
|
||||
running scheduler.
|
||||
|
||||
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this job store
|
||||
:param str|unicode alias: alias of this job store as it was assigned to the scheduler
|
||||
"""
|
||||
|
||||
self._scheduler = scheduler
|
||||
self._alias = alias
|
||||
self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias)
|
||||
|
||||
def shutdown(self):
|
||||
"""Frees any resources still bound to this job store."""
|
||||
|
||||
@abstractmethod
|
||||
def lookup_job(self, job_id):
|
||||
"""
|
||||
Returns a specific job, or ``None`` if it isn't found..
|
||||
|
||||
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned job to
|
||||
point to the scheduler and itself, respectively.
|
||||
|
||||
:param str|unicode job_id: identifier of the job
|
||||
:rtype: Job
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_due_jobs(self, now):
|
||||
"""
|
||||
Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``.
|
||||
The returned jobs must be sorted by next run time (ascending).
|
||||
|
||||
:param datetime.datetime now: the current (timezone aware) datetime
|
||||
:rtype: list[Job]
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_next_run_time(self):
|
||||
"""
|
||||
Returns the earliest run time of all the jobs stored in this job store, or ``None`` if there are no active jobs.
|
||||
|
||||
:rtype: datetime.datetime
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_all_jobs(self):
|
||||
"""
|
||||
Returns a list of all jobs in this job store. The returned jobs should be sorted by next run time (ascending).
|
||||
Paused jobs (next_run_time is None) should be sorted last.
|
||||
|
||||
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned jobs to
|
||||
point to the scheduler and itself, respectively.
|
||||
|
||||
:rtype: list[Job]
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def add_job(self, job):
|
||||
"""
|
||||
Adds the given job to this store.
|
||||
|
||||
:param Job job: the job to add
|
||||
:raises ConflictingIdError: if there is another job in this store with the same ID
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update_job(self, job):
|
||||
"""
|
||||
Replaces the job in the store with the given newer version.
|
||||
|
||||
:param Job job: the job to update
|
||||
:raises JobLookupError: if the job does not exist
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def remove_job(self, job_id):
|
||||
"""
|
||||
Removes the given job from this store.
|
||||
|
||||
:param str|unicode job_id: identifier of the job
|
||||
:raises JobLookupError: if the job does not exist
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def remove_all_jobs(self):
|
||||
"""Removes all jobs from this store."""
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s>' % self.__class__.__name__
|
107
lib/apscheduler/jobstores/memory.py
Normal file
107
lib/apscheduler/jobstores/memory.py
Normal file
|
@ -0,0 +1,107 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import datetime_to_utc_timestamp
|
||||
|
||||
|
||||
class MemoryJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in an array in RAM. Provides no persistence support.
|
||||
|
||||
Plugin alias: ``memory``
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(MemoryJobStore, self).__init__()
|
||||
self._jobs = [] # list of (job, timestamp), sorted by next_run_time and job id (ascending)
|
||||
self._jobs_index = {} # id -> (job, timestamp) lookup table
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
return self._jobs_index.get(job_id, (None, None))[0]
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
now_timestamp = datetime_to_utc_timestamp(now)
|
||||
pending = []
|
||||
for job, timestamp in self._jobs:
|
||||
if timestamp is None or timestamp > now_timestamp:
|
||||
break
|
||||
pending.append(job)
|
||||
|
||||
return pending
|
||||
|
||||
def get_next_run_time(self):
|
||||
return self._jobs[0][0].next_run_time if self._jobs else None
|
||||
|
||||
def get_all_jobs(self):
|
||||
return [j[0] for j in self._jobs]
|
||||
|
||||
def add_job(self, job):
|
||||
if job.id in self._jobs_index:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
timestamp = datetime_to_utc_timestamp(job.next_run_time)
|
||||
index = self._get_job_index(timestamp, job.id)
|
||||
self._jobs.insert(index, (job, timestamp))
|
||||
self._jobs_index[job.id] = (job, timestamp)
|
||||
|
||||
def update_job(self, job):
|
||||
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
|
||||
if old_job is None:
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
# If the next run time has not changed, simply replace the job in its present index.
|
||||
# Otherwise, reinsert the job to the list to preserve the ordering.
|
||||
old_index = self._get_job_index(old_timestamp, old_job.id)
|
||||
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
|
||||
if old_timestamp == new_timestamp:
|
||||
self._jobs[old_index] = (job, new_timestamp)
|
||||
else:
|
||||
del self._jobs[old_index]
|
||||
new_index = self._get_job_index(new_timestamp, job.id)
|
||||
self._jobs.insert(new_index, (job, new_timestamp))
|
||||
|
||||
self._jobs_index[old_job.id] = (job, new_timestamp)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
job, timestamp = self._jobs_index.get(job_id, (None, None))
|
||||
if job is None:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
index = self._get_job_index(timestamp, job_id)
|
||||
del self._jobs[index]
|
||||
del self._jobs_index[job.id]
|
||||
|
||||
def remove_all_jobs(self):
|
||||
self._jobs = []
|
||||
self._jobs_index = {}
|
||||
|
||||
def shutdown(self):
|
||||
self.remove_all_jobs()
|
||||
|
||||
def _get_job_index(self, timestamp, job_id):
|
||||
"""
|
||||
Returns the index of the given job, or if it's not found, the index where the job should be inserted based on
|
||||
the given timestamp.
|
||||
|
||||
:type timestamp: int
|
||||
:type job_id: str
|
||||
"""
|
||||
|
||||
lo, hi = 0, len(self._jobs)
|
||||
timestamp = float('inf') if timestamp is None else timestamp
|
||||
while lo < hi:
|
||||
mid = (lo + hi) // 2
|
||||
mid_job, mid_timestamp = self._jobs[mid]
|
||||
mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp
|
||||
if mid_timestamp > timestamp:
|
||||
hi = mid
|
||||
elif mid_timestamp < timestamp:
|
||||
lo = mid + 1
|
||||
elif mid_job.id > job_id:
|
||||
hi = mid
|
||||
elif mid_job.id < job_id:
|
||||
lo = mid + 1
|
||||
else:
|
||||
return mid
|
||||
|
||||
return lo
|
124
lib/apscheduler/jobstores/mongodb.py
Normal file
124
lib/apscheduler/jobstores/mongodb.py
Normal file
|
@ -0,0 +1,124 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
try:
|
||||
from bson.binary import Binary
|
||||
from pymongo.errors import DuplicateKeyError
|
||||
from pymongo import MongoClient, ASCENDING
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('MongoDBJobStore requires PyMongo installed')
|
||||
|
||||
|
||||
class MongoDBJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to pymongo's `MongoClient
|
||||
<http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_.
|
||||
|
||||
Plugin alias: ``mongodb``
|
||||
|
||||
:param str database: database to store jobs in
|
||||
:param str collection: collection to store jobs in
|
||||
:param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of providing connection
|
||||
arguments
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
|
||||
"""
|
||||
|
||||
def __init__(self, database='apscheduler', collection='jobs', client=None,
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
|
||||
super(MongoDBJobStore, self).__init__()
|
||||
self.pickle_protocol = pickle_protocol
|
||||
|
||||
if not database:
|
||||
raise ValueError('The "database" parameter must not be empty')
|
||||
if not collection:
|
||||
raise ValueError('The "collection" parameter must not be empty')
|
||||
|
||||
if client:
|
||||
self.connection = maybe_ref(client)
|
||||
else:
|
||||
connect_args.setdefault('w', 1)
|
||||
self.connection = MongoClient(**connect_args)
|
||||
|
||||
self.collection = self.connection[database][collection]
|
||||
self.collection.ensure_index('next_run_time', sparse=True)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
document = self.collection.find_one(job_id, ['job_state'])
|
||||
return self._reconstitute_job(document['job_state']) if document else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
return self._get_jobs({'next_run_time': {'$lte': timestamp}})
|
||||
|
||||
def get_next_run_time(self):
|
||||
document = self.collection.find_one({'next_run_time': {'$ne': None}}, fields=['next_run_time'],
|
||||
sort=[('next_run_time', ASCENDING)])
|
||||
return utc_timestamp_to_datetime(document['next_run_time']) if document else None
|
||||
|
||||
def get_all_jobs(self):
|
||||
return self._get_jobs({})
|
||||
|
||||
def add_job(self, job):
|
||||
try:
|
||||
self.collection.insert({
|
||||
'_id': job.id,
|
||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||
})
|
||||
except DuplicateKeyError:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
changes = {
|
||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||
'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||
}
|
||||
result = self.collection.update({'_id': job.id}, {'$set': changes})
|
||||
if result and result['n'] == 0:
|
||||
raise JobLookupError(id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
result = self.collection.remove(job_id)
|
||||
if result and result['n'] == 0:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
self.collection.remove()
|
||||
|
||||
def shutdown(self):
|
||||
self.connection.disconnect()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self, conditions):
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
for document in self.collection.find(conditions, ['_id', 'job_state'], sort=[('next_run_time', ASCENDING)]):
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(document['job_state']))
|
||||
except:
|
||||
self._logger.exception('Unable to restore job "%s" -- removing it', document['_id'])
|
||||
failed_job_ids.append(document['_id'])
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
self.collection.remove({'_id': {'$in': failed_job_ids}})
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s (client=%s)>' % (self.__class__.__name__, self.connection)
|
138
lib/apscheduler/jobstores/redis.py
Normal file
138
lib/apscheduler/jobstores/redis.py
Normal file
|
@ -0,0 +1,138 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import six
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
try:
|
||||
from redis import StrictRedis
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('RedisJobStore requires redis installed')
|
||||
|
||||
|
||||
class RedisJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's StrictRedis.
|
||||
|
||||
Plugin alias: ``redis``
|
||||
|
||||
:param int db: the database number to store jobs in
|
||||
:param str jobs_key: key to store jobs in
|
||||
:param str run_times_key: key to store the jobs' run times in
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
|
||||
"""
|
||||
|
||||
def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
|
||||
super(RedisJobStore, self).__init__()
|
||||
|
||||
if db is None:
|
||||
raise ValueError('The "db" parameter must not be empty')
|
||||
if not jobs_key:
|
||||
raise ValueError('The "jobs_key" parameter must not be empty')
|
||||
if not run_times_key:
|
||||
raise ValueError('The "run_times_key" parameter must not be empty')
|
||||
|
||||
self.pickle_protocol = pickle_protocol
|
||||
self.jobs_key = jobs_key
|
||||
self.run_times_key = run_times_key
|
||||
self.redis = StrictRedis(db=int(db), **connect_args)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
job_state = self.redis.hget(self.jobs_key, job_id)
|
||||
return self._reconstitute_job(job_state) if job_state else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
|
||||
if job_ids:
|
||||
job_states = self.redis.hmget(self.jobs_key, *job_ids)
|
||||
return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
|
||||
return []
|
||||
|
||||
def get_next_run_time(self):
|
||||
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
|
||||
if next_run_time:
|
||||
return utc_timestamp_to_datetime(next_run_time[0][1])
|
||||
|
||||
def get_all_jobs(self):
|
||||
job_states = self.redis.hgetall(self.jobs_key)
|
||||
jobs = self._reconstitute_jobs(six.iteritems(job_states))
|
||||
return sorted(jobs, key=lambda job: job.next_run_time)
|
||||
|
||||
def add_job(self, job):
|
||||
if self.redis.hexists(self.jobs_key, job.id):
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
|
||||
pipe.execute()
|
||||
|
||||
def update_job(self, job):
|
||||
if not self.redis.hexists(self.jobs_key, job.id):
|
||||
raise JobLookupError(job.id)
|
||||
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
|
||||
if job.next_run_time:
|
||||
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
|
||||
else:
|
||||
pipe.zrem(self.run_times_key, job.id)
|
||||
pipe.execute()
|
||||
|
||||
def remove_job(self, job_id):
|
||||
if not self.redis.hexists(self.jobs_key, job_id):
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.hdel(self.jobs_key, job_id)
|
||||
pipe.zrem(self.run_times_key, job_id)
|
||||
pipe.execute()
|
||||
|
||||
def remove_all_jobs(self):
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.delete(self.jobs_key)
|
||||
pipe.delete(self.run_times_key)
|
||||
pipe.execute()
|
||||
|
||||
def shutdown(self):
|
||||
self.redis.connection_pool.disconnect()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _reconstitute_jobs(self, job_states):
|
||||
jobs = []
|
||||
failed_job_ids = []
|
||||
for job_id, job_state in job_states:
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(job_state))
|
||||
except:
|
||||
self._logger.exception('Unable to restore job "%s" -- removing it', job_id)
|
||||
failed_job_ids.append(job_id)
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.hdel(self.jobs_key, *failed_job_ids)
|
||||
pipe.zrem(self.run_times_key, *failed_job_ids)
|
||||
pipe.execute()
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s>' % self.__class__.__name__
|
137
lib/apscheduler/jobstores/sqlalchemy.py
Normal file
137
lib/apscheduler/jobstores/sqlalchemy.py
Normal file
|
@ -0,0 +1,137 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
try:
|
||||
from sqlalchemy import create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
|
||||
|
||||
|
||||
class SQLAlchemyJobStore(BaseJobStore):
|
||||
"""
|
||||
Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database.
|
||||
|
||||
Plugin alias: ``sqlalchemy``
|
||||
|
||||
:param str url: connection string (see `SQLAlchemy documentation
|
||||
<http://docs.sqlalchemy.org/en/latest/core/engines.html?highlight=create_engine#database-urls>`_
|
||||
on this)
|
||||
:param engine: an SQLAlchemy Engine to use instead of creating a new one based on ``url``
|
||||
:param str tablename: name of the table to store jobs in
|
||||
:param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of creating a new one
|
||||
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
|
||||
"""
|
||||
|
||||
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
|
||||
pickle_protocol=pickle.HIGHEST_PROTOCOL):
|
||||
super(SQLAlchemyJobStore, self).__init__()
|
||||
self.pickle_protocol = pickle_protocol
|
||||
metadata = maybe_ref(metadata) or MetaData()
|
||||
|
||||
if engine:
|
||||
self.engine = maybe_ref(engine)
|
||||
elif url:
|
||||
self.engine = create_engine(url)
|
||||
else:
|
||||
raise ValueError('Need either "engine" or "url" defined')
|
||||
|
||||
# 191 = max key length in MySQL for InnoDB/utf8mb4 tables, 25 = precision that translates to an 8-byte float
|
||||
self.jobs_t = Table(
|
||||
tablename, metadata,
|
||||
Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
|
||||
Column('next_run_time', Float(25), index=True),
|
||||
Column('job_state', LargeBinary, nullable=False)
|
||||
)
|
||||
|
||||
self.jobs_t.create(self.engine, True)
|
||||
|
||||
def lookup_job(self, job_id):
|
||||
selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id)
|
||||
job_state = self.engine.execute(selectable).scalar()
|
||||
return self._reconstitute_job(job_state) if job_state else None
|
||||
|
||||
def get_due_jobs(self, now):
|
||||
timestamp = datetime_to_utc_timestamp(now)
|
||||
return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
|
||||
|
||||
def get_next_run_time(self):
|
||||
selectable = select([self.jobs_t.c.next_run_time]).where(self.jobs_t.c.next_run_time != None).\
|
||||
order_by(self.jobs_t.c.next_run_time).limit(1)
|
||||
next_run_time = self.engine.execute(selectable).scalar()
|
||||
return utc_timestamp_to_datetime(next_run_time)
|
||||
|
||||
def get_all_jobs(self):
|
||||
return self._get_jobs()
|
||||
|
||||
def add_job(self, job):
|
||||
insert = self.jobs_t.insert().values(**{
|
||||
'id': job.id,
|
||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||
'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
|
||||
})
|
||||
try:
|
||||
self.engine.execute(insert)
|
||||
except IntegrityError:
|
||||
raise ConflictingIdError(job.id)
|
||||
|
||||
def update_job(self, job):
|
||||
update = self.jobs_t.update().values(**{
|
||||
'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
|
||||
'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
|
||||
}).where(self.jobs_t.c.id == job.id)
|
||||
result = self.engine.execute(update)
|
||||
if result.rowcount == 0:
|
||||
raise JobLookupError(id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
|
||||
result = self.engine.execute(delete)
|
||||
if result.rowcount == 0:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def remove_all_jobs(self):
|
||||
delete = self.jobs_t.delete()
|
||||
self.engine.execute(delete)
|
||||
|
||||
def shutdown(self):
|
||||
self.engine.dispose()
|
||||
|
||||
def _reconstitute_job(self, job_state):
|
||||
job_state = pickle.loads(job_state)
|
||||
job_state['jobstore'] = self
|
||||
job = Job.__new__(Job)
|
||||
job.__setstate__(job_state)
|
||||
job._scheduler = self._scheduler
|
||||
job._jobstore_alias = self._alias
|
||||
return job
|
||||
|
||||
def _get_jobs(self, *conditions):
|
||||
jobs = []
|
||||
selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).order_by(self.jobs_t.c.next_run_time)
|
||||
selectable = selectable.where(*conditions) if conditions else selectable
|
||||
failed_job_ids = set()
|
||||
for row in self.engine.execute(selectable):
|
||||
try:
|
||||
jobs.append(self._reconstitute_job(row.job_state))
|
||||
except:
|
||||
self._logger.exception('Unable to restore job "%s" -- removing it', row.id)
|
||||
failed_job_ids.add(row.id)
|
||||
|
||||
# Remove all the jobs we failed to restore
|
||||
if failed_job_ids:
|
||||
delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids))
|
||||
self.engine.execute(delete)
|
||||
|
||||
return jobs
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
|
12
lib/apscheduler/schedulers/__init__.py
Normal file
12
lib/apscheduler/schedulers/__init__.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
class SchedulerAlreadyRunningError(Exception):
|
||||
"""Raised when attempting to start or configure the scheduler when it's already running."""
|
||||
|
||||
def __str__(self):
|
||||
return 'Scheduler is already running'
|
||||
|
||||
|
||||
class SchedulerNotRunningError(Exception):
|
||||
"""Raised when attempting to shutdown the scheduler when it's not running."""
|
||||
|
||||
def __str__(self):
|
||||
return 'Scheduler is not running'
|
68
lib/apscheduler/schedulers/asyncio.py
Normal file
68
lib/apscheduler/schedulers/asyncio.py
Normal file
|
@ -0,0 +1,68 @@
|
|||
from __future__ import absolute_import
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
import asyncio
|
||||
except ImportError: # pragma: nocover
|
||||
try:
|
||||
import trollius as asyncio
|
||||
except ImportError:
|
||||
raise ImportError('AsyncIOScheduler requires either Python 3.4 or the asyncio package installed')
|
||||
|
||||
|
||||
def run_in_event_loop(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self._eventloop.call_soon_threadsafe(func, self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
class AsyncIOScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
|
||||
|
||||
Extra options:
|
||||
|
||||
============== =============================================================
|
||||
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
|
||||
============== =============================================================
|
||||
"""
|
||||
|
||||
_eventloop = None
|
||||
_timeout = None
|
||||
|
||||
def start(self):
|
||||
super(AsyncIOScheduler, self).start()
|
||||
self.wakeup()
|
||||
|
||||
@run_in_event_loop
|
||||
def shutdown(self, wait=True):
|
||||
super(AsyncIOScheduler, self).shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _configure(self, config):
|
||||
self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
|
||||
super(AsyncIOScheduler, self)._configure(config)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timeout:
|
||||
self._timeout.cancel()
|
||||
del self._timeout
|
||||
|
||||
@run_in_event_loop
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.asyncio import AsyncIOExecutor
|
||||
return AsyncIOExecutor()
|
39
lib/apscheduler/schedulers/background.py
Normal file
39
lib/apscheduler/schedulers/background.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
from __future__ import absolute_import
|
||||
from threading import Thread, Event
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.util import asbool
|
||||
|
||||
|
||||
class BackgroundScheduler(BlockingScheduler):
|
||||
"""
|
||||
A scheduler that runs in the background using a separate thread
|
||||
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
|
||||
|
||||
Extra options:
|
||||
|
||||
========== ============================================================================================
|
||||
``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``,
|
||||
see `the documentation <https://docs.python.org/3.4/library/threading.html#thread-objects>`_
|
||||
for further details)
|
||||
========== ============================================================================================
|
||||
"""
|
||||
|
||||
_thread = None
|
||||
|
||||
def _configure(self, config):
|
||||
self._daemon = asbool(config.pop('daemon', True))
|
||||
super(BackgroundScheduler, self)._configure(config)
|
||||
|
||||
def start(self):
|
||||
BaseScheduler.start(self)
|
||||
self._event = Event()
|
||||
self._thread = Thread(target=self._main_loop, name='APScheduler')
|
||||
self._thread.daemon = self._daemon
|
||||
self._thread.start()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
super(BackgroundScheduler, self).shutdown(wait)
|
||||
self._thread.join()
|
||||
del self._thread
|
845
lib/apscheduler/schedulers/base.py
Normal file
845
lib/apscheduler/schedulers/base.py
Normal file
|
@ -0,0 +1,845 @@
|
|||
from __future__ import print_function
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections import MutableMapping
|
||||
from threading import RLock
|
||||
from datetime import datetime
|
||||
from logging import getLogger
|
||||
import sys
|
||||
|
||||
from pkg_resources import iter_entry_points
|
||||
from tzlocal import get_localzone
|
||||
import six
|
||||
|
||||
from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError
|
||||
from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor
|
||||
from apscheduler.executors.pool import ThreadPoolExecutor
|
||||
from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore
|
||||
from apscheduler.jobstores.memory import MemoryJobStore
|
||||
from apscheduler.job import Job
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined
|
||||
from apscheduler.events import (
|
||||
SchedulerEvent, JobEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, EVENT_JOBSTORE_ADDED,
|
||||
EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED,
|
||||
EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED)
|
||||
|
||||
|
||||
class BaseScheduler(six.with_metaclass(ABCMeta)):
|
||||
"""
|
||||
Abstract base class for all schedulers. Takes the following keyword arguments:
|
||||
|
||||
:param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to apscheduler.scheduler)
|
||||
:param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone)
|
||||
:param dict job_defaults: default values for newly added jobs
|
||||
:param dict jobstores: a dictionary of job store alias -> job store instance or configuration dict
|
||||
:param dict executors: a dictionary of executor alias -> executor instance or configuration dict
|
||||
|
||||
.. seealso:: :ref:`scheduler-config`
|
||||
"""
|
||||
|
||||
_trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers'))
|
||||
_trigger_classes = {}
|
||||
_executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors'))
|
||||
_executor_classes = {}
|
||||
_jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores'))
|
||||
_jobstore_classes = {}
|
||||
_stopped = True
|
||||
|
||||
#
|
||||
# Public API
|
||||
#
|
||||
|
||||
def __init__(self, gconfig={}, **options):
|
||||
super(BaseScheduler, self).__init__()
|
||||
self._executors = {}
|
||||
self._executors_lock = self._create_lock()
|
||||
self._jobstores = {}
|
||||
self._jobstores_lock = self._create_lock()
|
||||
self._listeners = []
|
||||
self._listeners_lock = self._create_lock()
|
||||
self._pending_jobs = []
|
||||
self.configure(gconfig, **options)
|
||||
|
||||
def configure(self, gconfig={}, prefix='apscheduler.', **options):
|
||||
"""
|
||||
Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running.
|
||||
|
||||
:param dict gconfig: a "global" configuration dictionary whose values can be overridden by keyword arguments to
|
||||
this method
|
||||
:param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with this string
|
||||
(pass an empty string or ``None`` to use all keys)
|
||||
:raises SchedulerAlreadyRunningError: if the scheduler is already running
|
||||
"""
|
||||
|
||||
if self.running:
|
||||
raise SchedulerAlreadyRunningError
|
||||
|
||||
# If a non-empty prefix was given, strip it from the keys in the global configuration dict
|
||||
if prefix:
|
||||
prefixlen = len(prefix)
|
||||
gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) if key.startswith(prefix))
|
||||
|
||||
# Create a structure from the dotted options (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}})
|
||||
config = {}
|
||||
for key, value in six.iteritems(gconfig):
|
||||
parts = key.split('.')
|
||||
parent = config
|
||||
key = parts.pop(0)
|
||||
while parts:
|
||||
parent = parent.setdefault(key, {})
|
||||
key = parts.pop(0)
|
||||
parent[key] = value
|
||||
|
||||
# Override any options with explicit keyword arguments
|
||||
config.update(options)
|
||||
self._configure(config)
|
||||
|
||||
@abstractmethod
|
||||
def start(self):
|
||||
"""
|
||||
Starts the scheduler. The details of this process depend on the implementation.
|
||||
|
||||
:raises SchedulerAlreadyRunningError: if the scheduler is already running
|
||||
"""
|
||||
|
||||
if self.running:
|
||||
raise SchedulerAlreadyRunningError
|
||||
|
||||
with self._executors_lock:
|
||||
# Create a default executor if nothing else is configured
|
||||
if 'default' not in self._executors:
|
||||
self.add_executor(self._create_default_executor(), 'default')
|
||||
|
||||
# Start all the executors
|
||||
for alias, executor in six.iteritems(self._executors):
|
||||
executor.start(self, alias)
|
||||
|
||||
with self._jobstores_lock:
|
||||
# Create a default job store if nothing else is configured
|
||||
if 'default' not in self._jobstores:
|
||||
self.add_jobstore(self._create_default_jobstore(), 'default')
|
||||
|
||||
# Start all the job stores
|
||||
for alias, store in six.iteritems(self._jobstores):
|
||||
store.start(self, alias)
|
||||
|
||||
# Schedule all pending jobs
|
||||
for job, jobstore_alias, replace_existing in self._pending_jobs:
|
||||
self._real_add_job(job, jobstore_alias, replace_existing, False)
|
||||
del self._pending_jobs[:]
|
||||
|
||||
self._stopped = False
|
||||
self._logger.info('Scheduler started')
|
||||
|
||||
# Notify listeners that the scheduler has been started
|
||||
self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START))
|
||||
|
||||
@abstractmethod
|
||||
def shutdown(self, wait=True):
|
||||
"""
|
||||
Shuts down the scheduler. Does not interrupt any currently running jobs.
|
||||
|
||||
:param bool wait: ``True`` to wait until all currently executing jobs have finished
|
||||
:raises SchedulerNotRunningError: if the scheduler has not been started yet
|
||||
"""
|
||||
|
||||
if not self.running:
|
||||
raise SchedulerNotRunningError
|
||||
|
||||
self._stopped = True
|
||||
|
||||
# Shut down all executors
|
||||
for executor in six.itervalues(self._executors):
|
||||
executor.shutdown(wait)
|
||||
|
||||
# Shut down all job stores
|
||||
for jobstore in six.itervalues(self._jobstores):
|
||||
jobstore.shutdown()
|
||||
|
||||
self._logger.info('Scheduler has been shut down')
|
||||
self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return not self._stopped
|
||||
|
||||
def add_executor(self, executor, alias='default', **executor_opts):
|
||||
"""
|
||||
Adds an executor to this scheduler. Any extra keyword arguments will be passed to the executor plugin's
|
||||
constructor, assuming that the first argument is the name of an executor plugin.
|
||||
|
||||
:param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor instance or the name of
|
||||
an executor plugin
|
||||
:param str|unicode alias: alias for the scheduler
|
||||
:raises ValueError: if there is already an executor by the given alias
|
||||
"""
|
||||
|
||||
with self._executors_lock:
|
||||
if alias in self._executors:
|
||||
raise ValueError('This scheduler already has an executor by the alias of "%s"' % alias)
|
||||
|
||||
if isinstance(executor, BaseExecutor):
|
||||
self._executors[alias] = executor
|
||||
elif isinstance(executor, six.string_types):
|
||||
self._executors[alias] = executor = self._create_plugin_instance('executor', executor, executor_opts)
|
||||
else:
|
||||
raise TypeError('Expected an executor instance or a string, got %s instead' %
|
||||
executor.__class__.__name__)
|
||||
|
||||
# Start the executor right away if the scheduler is running
|
||||
if self.running:
|
||||
executor.start(self)
|
||||
|
||||
self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias))
|
||||
|
||||
def remove_executor(self, alias, shutdown=True):
|
||||
"""
|
||||
Removes the executor by the given alias from this scheduler.
|
||||
|
||||
:param str|unicode alias: alias of the executor
|
||||
:param bool shutdown: ``True`` to shut down the executor after removing it
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
executor = self._lookup_executor(alias)
|
||||
del self._executors[alias]
|
||||
|
||||
if shutdown:
|
||||
executor.shutdown()
|
||||
|
||||
self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias))
|
||||
|
||||
def add_jobstore(self, jobstore, alias='default', **jobstore_opts):
|
||||
"""
|
||||
Adds a job store to this scheduler. Any extra keyword arguments will be passed to the job store plugin's
|
||||
constructor, assuming that the first argument is the name of a job store plugin.
|
||||
|
||||
:param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added
|
||||
:param str|unicode alias: alias for the job store
|
||||
:raises ValueError: if there is already a job store by the given alias
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
if alias in self._jobstores:
|
||||
raise ValueError('This scheduler already has a job store by the alias of "%s"' % alias)
|
||||
|
||||
if isinstance(jobstore, BaseJobStore):
|
||||
self._jobstores[alias] = jobstore
|
||||
elif isinstance(jobstore, six.string_types):
|
||||
self._jobstores[alias] = jobstore = self._create_plugin_instance('jobstore', jobstore, jobstore_opts)
|
||||
else:
|
||||
raise TypeError('Expected a job store instance or a string, got %s instead' %
|
||||
jobstore.__class__.__name__)
|
||||
|
||||
# Start the job store right away if the scheduler is running
|
||||
if self.running:
|
||||
jobstore.start(self, alias)
|
||||
|
||||
# Notify listeners that a new job store has been added
|
||||
self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias))
|
||||
|
||||
# Notify the scheduler so it can scan the new job store for jobs
|
||||
if self.running:
|
||||
self.wakeup()
|
||||
|
||||
def remove_jobstore(self, alias, shutdown=True):
|
||||
"""
|
||||
Removes the job store by the given alias from this scheduler.
|
||||
|
||||
:param str|unicode alias: alias of the job store
|
||||
:param bool shutdown: ``True`` to shut down the job store after removing it
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
jobstore = self._lookup_jobstore(alias)
|
||||
del self._jobstores[alias]
|
||||
|
||||
if shutdown:
|
||||
jobstore.shutdown()
|
||||
|
||||
self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias))
|
||||
|
||||
def add_listener(self, callback, mask=EVENT_ALL):
|
||||
"""
|
||||
add_listener(callback, mask=EVENT_ALL)
|
||||
|
||||
Adds a listener for scheduler events. When a matching event occurs, ``callback`` is executed with the event
|
||||
object as its sole argument. If the ``mask`` parameter is not provided, the callback will receive events of all
|
||||
types.
|
||||
|
||||
:param callback: any callable that takes one argument
|
||||
:param int mask: bitmask that indicates which events should be listened to
|
||||
|
||||
.. seealso:: :mod:`apscheduler.events`
|
||||
.. seealso:: :ref:`scheduler-events`
|
||||
"""
|
||||
|
||||
with self._listeners_lock:
|
||||
self._listeners.append((callback, mask))
|
||||
|
||||
def remove_listener(self, callback):
|
||||
"""Removes a previously added event listener."""
|
||||
|
||||
with self._listeners_lock:
|
||||
for i, (cb, _) in enumerate(self._listeners):
|
||||
if callback == cb:
|
||||
del self._listeners[i]
|
||||
|
||||
def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined,
|
||||
coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default',
|
||||
executor='default', replace_existing=False, **trigger_args):
|
||||
"""
|
||||
add_job(func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, \
|
||||
coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', \
|
||||
executor='default', replace_existing=False, **trigger_args)
|
||||
|
||||
Adds the given job to the job list and wakes up the scheduler if it's already running.
|
||||
|
||||
Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is
|
||||
scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running).
|
||||
|
||||
The ``func`` argument can be given either as a callable object or a textual reference in the
|
||||
``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the
|
||||
second half is a reference to the callable object, relative to the module.
|
||||
|
||||
The ``trigger`` argument can either be:
|
||||
#. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword
|
||||
arguments to this method are passed on to the trigger's constructor
|
||||
#. an instance of a trigger class
|
||||
|
||||
:param func: callable (or a textual reference to one) to run at the given time
|
||||
:param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called
|
||||
:param list|tuple args: list of positional arguments to call func with
|
||||
:param dict kwargs: dict of keyword arguments to call func with
|
||||
:param str|unicode id: explicit identifier for the job (for modifying it later)
|
||||
:param str|unicode name: textual description of the job
|
||||
:param int misfire_grace_time: seconds after the designated run time that the job is still allowed to be run
|
||||
:param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more
|
||||
than once in succession
|
||||
:param int max_instances: maximum number of concurrently running instances allowed for this job
|
||||
:param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the
|
||||
job as paused)
|
||||
:param str|unicode jobstore: alias of the job store to store the job in
|
||||
:param str|unicode executor: alias of the executor to run the job with
|
||||
:param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the
|
||||
number of runs from the existing one)
|
||||
:rtype: Job
|
||||
"""
|
||||
|
||||
job_kwargs = {
|
||||
'trigger': self._create_trigger(trigger, trigger_args),
|
||||
'executor': executor,
|
||||
'func': func,
|
||||
'args': tuple(args) if args is not None else (),
|
||||
'kwargs': dict(kwargs) if kwargs is not None else {},
|
||||
'id': id,
|
||||
'name': name,
|
||||
'misfire_grace_time': misfire_grace_time,
|
||||
'coalesce': coalesce,
|
||||
'max_instances': max_instances,
|
||||
'next_run_time': next_run_time
|
||||
}
|
||||
job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if value is not undefined)
|
||||
job = Job(self, **job_kwargs)
|
||||
|
||||
# Don't really add jobs to job stores before the scheduler is up and running
|
||||
with self._jobstores_lock:
|
||||
if not self.running:
|
||||
self._pending_jobs.append((job, jobstore, replace_existing))
|
||||
self._logger.info('Adding job tentatively -- it will be properly scheduled when the scheduler starts')
|
||||
else:
|
||||
self._real_add_job(job, jobstore, replace_existing, True)
|
||||
|
||||
return job
|
||||
|
||||
def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined,
|
||||
coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default',
|
||||
executor='default', **trigger_args):
|
||||
"""
|
||||
scheduled_job(trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, \
|
||||
coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', \
|
||||
executor='default',**trigger_args)
|
||||
|
||||
A decorator version of :meth:`add_job`, except that ``replace_existing`` is always ``True``.
|
||||
|
||||
.. important:: The ``id`` argument must be given if scheduling a job in a persistent job store. The scheduler
|
||||
cannot, however, enforce this requirement.
|
||||
"""
|
||||
|
||||
def inner(func):
|
||||
self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, max_instances,
|
||||
next_run_time, jobstore, executor, True, **trigger_args)
|
||||
return func
|
||||
return inner
|
||||
|
||||
def modify_job(self, job_id, jobstore=None, **changes):
|
||||
"""
|
||||
Modifies the properties of a single job. Modifications are passed to this method as extra keyword arguments.
|
||||
|
||||
:param str|unicode job_id: the identifier of the job
|
||||
:param str|unicode jobstore: alias of the job store that contains the job
|
||||
"""
|
||||
with self._jobstores_lock:
|
||||
job, jobstore = self._lookup_job(job_id, jobstore)
|
||||
job._modify(**changes)
|
||||
if jobstore:
|
||||
self._lookup_jobstore(jobstore).update_job(job)
|
||||
|
||||
self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore))
|
||||
|
||||
# Wake up the scheduler since the job's next run time may have been changed
|
||||
self.wakeup()
|
||||
|
||||
def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args):
|
||||
"""
|
||||
Constructs a new trigger for a job and updates its next run time.
|
||||
Extra keyword arguments are passed directly to the trigger's constructor.
|
||||
|
||||
:param str|unicode job_id: the identifier of the job
|
||||
:param str|unicode jobstore: alias of the job store that contains the job
|
||||
:param trigger: alias of the trigger type or a trigger instance
|
||||
"""
|
||||
|
||||
trigger = self._create_trigger(trigger, trigger_args)
|
||||
now = datetime.now(self.timezone)
|
||||
next_run_time = trigger.get_next_fire_time(None, now)
|
||||
self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time)
|
||||
|
||||
def pause_job(self, job_id, jobstore=None):
|
||||
"""
|
||||
Causes the given job not to be executed until it is explicitly resumed.
|
||||
|
||||
:param str|unicode job_id: the identifier of the job
|
||||
:param str|unicode jobstore: alias of the job store that contains the job
|
||||
"""
|
||||
|
||||
self.modify_job(job_id, jobstore, next_run_time=None)
|
||||
|
||||
def resume_job(self, job_id, jobstore=None):
|
||||
"""
|
||||
Resumes the schedule of the given job, or removes the job if its schedule is finished.
|
||||
|
||||
:param str|unicode job_id: the identifier of the job
|
||||
:param str|unicode jobstore: alias of the job store that contains the job
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
job, jobstore = self._lookup_job(job_id, jobstore)
|
||||
now = datetime.now(self.timezone)
|
||||
next_run_time = job.trigger.get_next_fire_time(None, now)
|
||||
if next_run_time:
|
||||
self.modify_job(job_id, jobstore, next_run_time=next_run_time)
|
||||
else:
|
||||
self.remove_job(job.id, jobstore)
|
||||
|
||||
def get_jobs(self, jobstore=None, pending=None):
|
||||
"""
|
||||
Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled jobs, either from a
|
||||
specific job store or from all of them.
|
||||
|
||||
:param str|unicode jobstore: alias of the job store
|
||||
:param bool pending: ``False`` to leave out pending jobs (jobs that are waiting for the scheduler start to be
|
||||
added to their respective job stores), ``True`` to only include pending jobs, anything else
|
||||
to return both
|
||||
:rtype: list[Job]
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
jobs = []
|
||||
|
||||
if pending is not False:
|
||||
for job, alias, replace_existing in self._pending_jobs:
|
||||
if jobstore is None or alias == jobstore:
|
||||
jobs.append(job)
|
||||
|
||||
if pending is not True:
|
||||
for alias, store in six.iteritems(self._jobstores):
|
||||
if jobstore is None or alias == jobstore:
|
||||
jobs.extend(store.get_all_jobs())
|
||||
|
||||
return jobs
|
||||
|
||||
def get_job(self, job_id, jobstore=None):
|
||||
"""
|
||||
Returns the Job that matches the given ``job_id``.
|
||||
|
||||
:param str|unicode job_id: the identifier of the job
|
||||
:param str|unicode jobstore: alias of the job store that most likely contains the job
|
||||
:return: the Job by the given ID, or ``None`` if it wasn't found
|
||||
:rtype: Job
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
try:
|
||||
return self._lookup_job(job_id, jobstore)[0]
|
||||
except JobLookupError:
|
||||
return
|
||||
|
||||
def remove_job(self, job_id, jobstore=None):
|
||||
"""
|
||||
Removes a job, preventing it from being run any more.
|
||||
|
||||
:param str|unicode job_id: the identifier of the job
|
||||
:param str|unicode jobstore: alias of the job store that contains the job
|
||||
:raises JobLookupError: if the job was not found
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
# Check if the job is among the pending jobs
|
||||
for i, (job, jobstore_alias, replace_existing) in enumerate(self._pending_jobs):
|
||||
if job.id == job_id:
|
||||
del self._pending_jobs[i]
|
||||
jobstore = jobstore_alias
|
||||
break
|
||||
else:
|
||||
# Otherwise, try to remove it from each store until it succeeds or we run out of stores to check
|
||||
for alias, store in six.iteritems(self._jobstores):
|
||||
if jobstore in (None, alias):
|
||||
try:
|
||||
store.remove_job(job_id)
|
||||
except JobLookupError:
|
||||
continue
|
||||
|
||||
jobstore = alias
|
||||
break
|
||||
|
||||
if jobstore is None:
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
# Notify listeners that a job has been removed
|
||||
event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore)
|
||||
self._dispatch_event(event)
|
||||
|
||||
self._logger.info('Removed job %s', job_id)
|
||||
|
||||
def remove_all_jobs(self, jobstore=None):
|
||||
"""
|
||||
Removes all jobs from the specified job store, or all job stores if none is given.
|
||||
|
||||
:param str|unicode jobstore: alias of the job store
|
||||
"""
|
||||
|
||||
with self._jobstores_lock:
|
||||
if jobstore:
|
||||
self._pending_jobs = [pending for pending in self._pending_jobs if pending[1] != jobstore]
|
||||
else:
|
||||
self._pending_jobs = []
|
||||
|
||||
for alias, store in six.iteritems(self._jobstores):
|
||||
if jobstore in (None, alias):
|
||||
store.remove_all_jobs()
|
||||
|
||||
self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore))
|
||||
|
||||
def print_jobs(self, jobstore=None, out=None):
|
||||
"""
|
||||
print_jobs(jobstore=None, out=sys.stdout)
|
||||
|
||||
Prints out a textual listing of all jobs currently scheduled on either all job stores or just a specific one.
|
||||
|
||||
:param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores
|
||||
:param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is given)
|
||||
"""
|
||||
|
||||
out = out or sys.stdout
|
||||
with self._jobstores_lock:
|
||||
if self._pending_jobs:
|
||||
print(six.u('Pending jobs:'), file=out)
|
||||
for job, jobstore_alias, replace_existing in self._pending_jobs:
|
||||
if jobstore in (None, jobstore_alias):
|
||||
print(six.u(' %s') % job, file=out)
|
||||
|
||||
for alias, store in six.iteritems(self._jobstores):
|
||||
if jobstore in (None, alias):
|
||||
print(six.u('Jobstore %s:') % alias, file=out)
|
||||
jobs = store.get_all_jobs()
|
||||
if jobs:
|
||||
for job in jobs:
|
||||
print(six.u(' %s') % job, file=out)
|
||||
else:
|
||||
print(six.u(' No scheduled jobs'), file=out)
|
||||
|
||||
@abstractmethod
|
||||
def wakeup(self):
|
||||
"""
|
||||
Notifies the scheduler that there may be jobs due for execution.
|
||||
Triggers :meth:`_process_jobs` to be run in an implementation specific manner.
|
||||
"""
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
def _configure(self, config):
|
||||
# Set general options
|
||||
self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler')
|
||||
self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()
|
||||
|
||||
# Set the job defaults
|
||||
job_defaults = config.get('job_defaults', {})
|
||||
self._job_defaults = {
|
||||
'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)),
|
||||
'coalesce': asbool(job_defaults.get('coalesce', True)),
|
||||
'max_instances': asint(job_defaults.get('max_instances', 1))
|
||||
}
|
||||
|
||||
# Configure executors
|
||||
self._executors.clear()
|
||||
for alias, value in six.iteritems(config.get('executors', {})):
|
||||
if isinstance(value, BaseExecutor):
|
||||
self.add_executor(value, alias)
|
||||
elif isinstance(value, MutableMapping):
|
||||
executor_class = value.pop('class', None)
|
||||
plugin = value.pop('type', None)
|
||||
if plugin:
|
||||
executor = self._create_plugin_instance('executor', plugin, value)
|
||||
elif executor_class:
|
||||
cls = maybe_ref(executor_class)
|
||||
executor = cls(**value)
|
||||
else:
|
||||
raise ValueError('Cannot create executor "%s" -- either "type" or "class" must be defined' % alias)
|
||||
|
||||
self.add_executor(executor, alias)
|
||||
else:
|
||||
raise TypeError("Expected executor instance or dict for executors['%s'], got %s instead" % (
|
||||
alias, value.__class__.__name__))
|
||||
|
||||
# Configure job stores
|
||||
self._jobstores.clear()
|
||||
for alias, value in six.iteritems(config.get('jobstores', {})):
|
||||
if isinstance(value, BaseJobStore):
|
||||
self.add_jobstore(value, alias)
|
||||
elif isinstance(value, MutableMapping):
|
||||
jobstore_class = value.pop('class', None)
|
||||
plugin = value.pop('type', None)
|
||||
if plugin:
|
||||
jobstore = self._create_plugin_instance('jobstore', plugin, value)
|
||||
elif jobstore_class:
|
||||
cls = maybe_ref(jobstore_class)
|
||||
jobstore = cls(**value)
|
||||
else:
|
||||
raise ValueError('Cannot create job store "%s" -- either "type" or "class" must be defined' % alias)
|
||||
|
||||
self.add_jobstore(jobstore, alias)
|
||||
else:
|
||||
raise TypeError("Expected job store instance or dict for jobstores['%s'], got %s instead" % (
|
||||
alias, value.__class__.__name__))
|
||||
|
||||
def _create_default_executor(self):
|
||||
"""Creates a default executor store, specific to the particular scheduler type."""
|
||||
|
||||
return ThreadPoolExecutor()
|
||||
|
||||
def _create_default_jobstore(self):
|
||||
"""Creates a default job store, specific to the particular scheduler type."""
|
||||
|
||||
return MemoryJobStore()
|
||||
|
||||
def _lookup_executor(self, alias):
|
||||
"""
|
||||
Returns the executor instance by the given name from the list of executors that were added to this scheduler.
|
||||
|
||||
:type alias: str
|
||||
:raises KeyError: if no executor by the given alias is not found
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._executors[alias]
|
||||
except KeyError:
|
||||
raise KeyError('No such executor: %s' % alias)
|
||||
|
||||
def _lookup_jobstore(self, alias):
|
||||
"""
|
||||
Returns the job store instance by the given name from the list of job stores that were added to this scheduler.
|
||||
|
||||
:type alias: str
|
||||
:raises KeyError: if no job store by the given alias is not found
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._jobstores[alias]
|
||||
except KeyError:
|
||||
raise KeyError('No such job store: %s' % alias)
|
||||
|
||||
def _lookup_job(self, job_id, jobstore_alias):
|
||||
"""
|
||||
Finds a job by its ID.
|
||||
|
||||
:type job_id: str
|
||||
:param str jobstore_alias: alias of a job store to look in
|
||||
:return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of a pending job)
|
||||
:raises JobLookupError: if no job by the given ID is found.
|
||||
"""
|
||||
|
||||
# Check if the job is among the pending jobs
|
||||
for job, alias, replace_existing in self._pending_jobs:
|
||||
if job.id == job_id:
|
||||
return job, None
|
||||
|
||||
# Look in all job stores
|
||||
for alias, store in six.iteritems(self._jobstores):
|
||||
if jobstore_alias in (None, alias):
|
||||
job = store.lookup_job(job_id)
|
||||
if job is not None:
|
||||
return job, alias
|
||||
|
||||
raise JobLookupError(job_id)
|
||||
|
||||
def _dispatch_event(self, event):
|
||||
"""
|
||||
Dispatches the given event to interested listeners.
|
||||
|
||||
:param SchedulerEvent event: the event to send
|
||||
"""
|
||||
|
||||
with self._listeners_lock:
|
||||
listeners = tuple(self._listeners)
|
||||
|
||||
for cb, mask in listeners:
|
||||
if event.code & mask:
|
||||
try:
|
||||
cb(event)
|
||||
except:
|
||||
self._logger.exception('Error notifying listener')
|
||||
|
||||
def _real_add_job(self, job, jobstore_alias, replace_existing, wakeup):
|
||||
"""
|
||||
:param Job job: the job to add
|
||||
:param bool replace_existing: ``True`` to use update_job() in case the job already exists in the store
|
||||
:param bool wakeup: ``True`` to wake up the scheduler after adding the job
|
||||
"""
|
||||
|
||||
# Fill in undefined values with defaults
|
||||
replacements = {}
|
||||
for key, value in six.iteritems(self._job_defaults):
|
||||
if not hasattr(job, key):
|
||||
replacements[key] = value
|
||||
|
||||
# Calculate the next run time if there is none defined
|
||||
if not hasattr(job, 'next_run_time'):
|
||||
now = datetime.now(self.timezone)
|
||||
replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now)
|
||||
|
||||
# Apply any replacements
|
||||
job._modify(**replacements)
|
||||
|
||||
# Add the job to the given job store
|
||||
store = self._lookup_jobstore(jobstore_alias)
|
||||
try:
|
||||
store.add_job(job)
|
||||
except ConflictingIdError:
|
||||
if replace_existing:
|
||||
store.update_job(job)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Mark the job as no longer pending
|
||||
job._jobstore_alias = jobstore_alias
|
||||
|
||||
# Notify listeners that a new job has been added
|
||||
event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias)
|
||||
self._dispatch_event(event)
|
||||
|
||||
self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias)
|
||||
|
||||
# Notify the scheduler about the new job
|
||||
if wakeup:
|
||||
self.wakeup()
|
||||
|
||||
def _create_plugin_instance(self, type_, alias, constructor_kwargs):
|
||||
"""Creates an instance of the given plugin type, loading the plugin first if necessary."""
|
||||
|
||||
plugin_container, class_container, base_class = {
|
||||
'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger),
|
||||
'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore),
|
||||
'executor': (self._executor_plugins, self._executor_classes, BaseExecutor)
|
||||
}[type_]
|
||||
|
||||
try:
|
||||
plugin_cls = class_container[alias]
|
||||
except KeyError:
|
||||
if alias in plugin_container:
|
||||
plugin_cls = class_container[alias] = plugin_container[alias].load()
|
||||
if not issubclass(plugin_cls, base_class):
|
||||
raise TypeError('The {0} entry point does not point to a {0} class'.format(type_))
|
||||
else:
|
||||
raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias))
|
||||
|
||||
return plugin_cls(**constructor_kwargs)
|
||||
|
||||
def _create_trigger(self, trigger, trigger_args):
|
||||
if isinstance(trigger, BaseTrigger):
|
||||
return trigger
|
||||
elif trigger is None:
|
||||
trigger = 'date'
|
||||
elif not isinstance(trigger, six.string_types):
|
||||
raise TypeError('Expected a trigger instance or string, got %s instead' % trigger.__class__.__name__)
|
||||
|
||||
# Use the scheduler's time zone if nothing else is specified
|
||||
trigger_args.setdefault('timezone', self.timezone)
|
||||
|
||||
# Instantiate the trigger class
|
||||
return self._create_plugin_instance('trigger', trigger, trigger_args)
|
||||
|
||||
def _create_lock(self):
|
||||
"""Creates a reentrant lock object."""
|
||||
|
||||
return RLock()
|
||||
|
||||
def _process_jobs(self):
|
||||
"""
|
||||
Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next
|
||||
round.
|
||||
"""
|
||||
|
||||
self._logger.debug('Looking for jobs to run')
|
||||
now = datetime.now(self.timezone)
|
||||
next_wakeup_time = None
|
||||
|
||||
with self._jobstores_lock:
|
||||
for jobstore_alias, jobstore in six.iteritems(self._jobstores):
|
||||
for job in jobstore.get_due_jobs(now):
|
||||
# Look up the job's executor
|
||||
try:
|
||||
executor = self._lookup_executor(job.executor)
|
||||
except:
|
||||
self._logger.error(
|
||||
'Executor lookup ("%s") failed for job "%s" -- removing it from the job store',
|
||||
job.executor, job)
|
||||
self.remove_job(job.id, jobstore_alias)
|
||||
continue
|
||||
|
||||
run_times = job._get_run_times(now)
|
||||
run_times = run_times[-1:] if run_times and job.coalesce else run_times
|
||||
if run_times:
|
||||
try:
|
||||
executor.submit_job(job, run_times)
|
||||
except MaxInstancesReachedError:
|
||||
self._logger.warning(
|
||||
'Execution of job "%s" skipped: maximum number of running instances reached (%d)',
|
||||
job, job.max_instances)
|
||||
except:
|
||||
self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor)
|
||||
|
||||
# Update the job if it has a next execution time. Otherwise remove it from the job store.
|
||||
job_next_run = job.trigger.get_next_fire_time(run_times[-1], now)
|
||||
if job_next_run:
|
||||
job._modify(next_run_time=job_next_run)
|
||||
jobstore.update_job(job)
|
||||
else:
|
||||
self.remove_job(job.id, jobstore_alias)
|
||||
|
||||
# Set a new next wakeup time if there isn't one yet or the jobstore has an even earlier one
|
||||
jobstore_next_run_time = jobstore.get_next_run_time()
|
||||
if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time):
|
||||
next_wakeup_time = jobstore_next_run_time
|
||||
|
||||
# Determine the delay until this method should be called again
|
||||
if next_wakeup_time is not None:
|
||||
wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0)
|
||||
self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds)
|
||||
else:
|
||||
wait_seconds = None
|
||||
self._logger.debug('No jobs; waiting until a job is added')
|
||||
|
||||
return wait_seconds
|
32
lib/apscheduler/schedulers/blocking.py
Normal file
32
lib/apscheduler/schedulers/blocking.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
from __future__ import absolute_import
|
||||
from threading import Event
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
|
||||
class BlockingScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs in the foreground (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
|
||||
"""
|
||||
|
||||
MAX_WAIT_TIME = 4294967 # Maximum value accepted by Event.wait() on Windows
|
||||
|
||||
_event = None
|
||||
|
||||
def start(self):
|
||||
super(BlockingScheduler, self).start()
|
||||
self._event = Event()
|
||||
self._main_loop()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
super(BlockingScheduler, self).shutdown(wait)
|
||||
self._event.set()
|
||||
|
||||
def _main_loop(self):
|
||||
while self.running:
|
||||
wait_seconds = self._process_jobs()
|
||||
self._event.wait(wait_seconds if wait_seconds is not None else self.MAX_WAIT_TIME)
|
||||
self._event.clear()
|
||||
|
||||
def wakeup(self):
|
||||
self._event.set()
|
35
lib/apscheduler/schedulers/gevent.py
Normal file
35
lib/apscheduler/schedulers/gevent.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
try:
|
||||
from gevent.event import Event
|
||||
from gevent.lock import RLock
|
||||
import gevent
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('GeventScheduler requires gevent installed')
|
||||
|
||||
|
||||
class GeventScheduler(BlockingScheduler):
|
||||
"""A scheduler that runs as a Gevent greenlet."""
|
||||
|
||||
_greenlet = None
|
||||
|
||||
def start(self):
|
||||
BaseScheduler.start(self)
|
||||
self._event = Event()
|
||||
self._greenlet = gevent.spawn(self._main_loop)
|
||||
return self._greenlet
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
super(GeventScheduler, self).shutdown(wait)
|
||||
self._greenlet.join()
|
||||
del self._greenlet
|
||||
|
||||
def _create_lock(self):
|
||||
return RLock()
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.gevent import GeventExecutor
|
||||
return GeventExecutor()
|
46
lib/apscheduler/schedulers/qt.py
Normal file
46
lib/apscheduler/schedulers/qt.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
try:
|
||||
from PyQt5.QtCore import QObject, QTimer
|
||||
except ImportError: # pragma: nocover
|
||||
try:
|
||||
from PyQt4.QtCore import QObject, QTimer
|
||||
except ImportError:
|
||||
try:
|
||||
from PySide.QtCore import QObject, QTimer # flake8: noqa
|
||||
except ImportError:
|
||||
raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed')
|
||||
|
||||
|
||||
class QtScheduler(BaseScheduler):
|
||||
"""A scheduler that runs in a Qt event loop."""
|
||||
|
||||
_timer = None
|
||||
|
||||
def start(self):
|
||||
super(QtScheduler, self).start()
|
||||
self.wakeup()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
super(QtScheduler, self).shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timer = QTimer.singleShot(wait_seconds * 1000, self._process_jobs)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timer:
|
||||
if self._timer.isActive():
|
||||
self._timer.stop()
|
||||
del self._timer
|
||||
|
||||
def wakeup(self):
|
||||
self._start_timer(0)
|
||||
|
||||
def _process_jobs(self):
|
||||
wait_seconds = super(QtScheduler, self)._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
60
lib/apscheduler/schedulers/tornado.py
Normal file
60
lib/apscheduler/schedulers/tornado.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
from __future__ import absolute_import
|
||||
from datetime import timedelta
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
from tornado.ioloop import IOLoop
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('TornadoScheduler requires tornado installed')
|
||||
|
||||
|
||||
def run_in_ioloop(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self._ioloop.add_callback(func, self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
class TornadoScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on a Tornado IOLoop.
|
||||
|
||||
=========== ===============================================================
|
||||
``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
|
||||
=========== ===============================================================
|
||||
"""
|
||||
|
||||
_ioloop = None
|
||||
_timeout = None
|
||||
|
||||
def start(self):
|
||||
super(TornadoScheduler, self).start()
|
||||
self.wakeup()
|
||||
|
||||
@run_in_ioloop
|
||||
def shutdown(self, wait=True):
|
||||
super(TornadoScheduler, self).shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _configure(self, config):
|
||||
self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current()
|
||||
super(TornadoScheduler, self)._configure(config)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timeout:
|
||||
self._ioloop.remove_timeout(self._timeout)
|
||||
del self._timeout
|
||||
|
||||
@run_in_ioloop
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
65
lib/apscheduler/schedulers/twisted.py
Normal file
65
lib/apscheduler/schedulers/twisted.py
Normal file
|
@ -0,0 +1,65 @@
|
|||
from __future__ import absolute_import
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
from twisted.internet import reactor as default_reactor
|
||||
except ImportError: # pragma: nocover
|
||||
raise ImportError('TwistedScheduler requires Twisted installed')
|
||||
|
||||
|
||||
def run_in_reactor(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self._reactor.callFromThread(func, self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
class TwistedScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on a Twisted reactor.
|
||||
|
||||
Extra options:
|
||||
|
||||
=========== ========================================================
|
||||
``reactor`` Reactor instance to use (defaults to the global reactor)
|
||||
=========== ========================================================
|
||||
"""
|
||||
|
||||
_reactor = None
|
||||
_delayedcall = None
|
||||
|
||||
def _configure(self, config):
|
||||
self._reactor = maybe_ref(config.pop('reactor', default_reactor))
|
||||
super(TwistedScheduler, self)._configure(config)
|
||||
|
||||
def start(self):
|
||||
super(TwistedScheduler, self).start()
|
||||
self.wakeup()
|
||||
|
||||
@run_in_reactor
|
||||
def shutdown(self, wait=True):
|
||||
super(TwistedScheduler, self).shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._delayedcall and self._delayedcall.active():
|
||||
self._delayedcall.cancel()
|
||||
del self._delayedcall
|
||||
|
||||
@run_in_reactor
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.twisted import TwistedExecutor
|
||||
return TwistedExecutor()
|
0
lib/apscheduler/triggers/__init__.py
Normal file
0
lib/apscheduler/triggers/__init__.py
Normal file
16
lib/apscheduler/triggers/base.py
Normal file
16
lib/apscheduler/triggers/base.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class BaseTrigger(six.with_metaclass(ABCMeta)):
|
||||
"""Abstract base class that defines the interface that every trigger must implement."""
|
||||
|
||||
@abstractmethod
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
"""
|
||||
Returns the next datetime to fire on, If no such datetime can be calculated, returns ``None``.
|
||||
|
||||
:param datetime.datetime previous_fire_time: the previous time the trigger was fired
|
||||
:param datetime.datetime now: current datetime
|
||||
"""
|
176
lib/apscheduler/triggers/cron/__init__.py
Normal file
176
lib/apscheduler/triggers/cron/__init__.py
Normal file
|
@ -0,0 +1,176 @@
|
|||
from datetime import datetime, timedelta
|
||||
|
||||
from tzlocal import get_localzone
|
||||
import six
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.triggers.cron.fields import BaseField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES
|
||||
from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone
|
||||
|
||||
|
||||
class CronTrigger(BaseTrigger):
|
||||
"""
|
||||
Triggers when current time matches all specified time constraints, similarly to how the UNIX cron scheduler works.
|
||||
|
||||
:param int|str year: 4-digit year
|
||||
:param int|str month: month (1-12)
|
||||
:param int|str day: day of the (1-31)
|
||||
:param int|str week: ISO week (1-53)
|
||||
:param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
|
||||
:param int|str hour: hour (0-23)
|
||||
:param int|str minute: minute (0-59)
|
||||
:param int|str second: second (0-59)
|
||||
:param datetime|str start_date: earliest possible date/time to trigger on (inclusive)
|
||||
:param datetime|str end_date: latest possible date/time to trigger on (inclusive)
|
||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
|
||||
(defaults to scheduler timezone)
|
||||
|
||||
.. note:: The first weekday is always **monday**.
|
||||
"""
|
||||
|
||||
FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second')
|
||||
FIELDS_MAP = {
|
||||
'year': BaseField,
|
||||
'month': BaseField,
|
||||
'week': WeekField,
|
||||
'day': DayOfMonthField,
|
||||
'day_of_week': DayOfWeekField,
|
||||
'hour': BaseField,
|
||||
'minute': BaseField,
|
||||
'second': BaseField
|
||||
}
|
||||
|
||||
__slots__ = 'timezone', 'start_date', 'end_date', 'fields'
|
||||
|
||||
def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None,
|
||||
second=None, start_date=None, end_date=None, timezone=None):
|
||||
if timezone:
|
||||
self.timezone = astimezone(timezone)
|
||||
elif start_date and start_date.tzinfo:
|
||||
self.timezone = start_date.tzinfo
|
||||
elif end_date and end_date.tzinfo:
|
||||
self.timezone = end_date.tzinfo
|
||||
else:
|
||||
self.timezone = get_localzone()
|
||||
|
||||
self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
|
||||
self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
|
||||
|
||||
values = dict((key, value) for (key, value) in six.iteritems(locals())
|
||||
if key in self.FIELD_NAMES and value is not None)
|
||||
self.fields = []
|
||||
assign_defaults = False
|
||||
for field_name in self.FIELD_NAMES:
|
||||
if field_name in values:
|
||||
exprs = values.pop(field_name)
|
||||
is_default = False
|
||||
assign_defaults = not values
|
||||
elif assign_defaults:
|
||||
exprs = DEFAULT_VALUES[field_name]
|
||||
is_default = True
|
||||
else:
|
||||
exprs = '*'
|
||||
is_default = True
|
||||
|
||||
field_class = self.FIELDS_MAP[field_name]
|
||||
field = field_class(field_name, exprs, is_default)
|
||||
self.fields.append(field)
|
||||
|
||||
def _increment_field_value(self, dateval, fieldnum):
|
||||
"""
|
||||
Increments the designated field and resets all less significant fields to their minimum values.
|
||||
|
||||
:type dateval: datetime
|
||||
:type fieldnum: int
|
||||
:return: a tuple containing the new date, and the number of the field that was actually incremented
|
||||
:rtype: tuple
|
||||
"""
|
||||
|
||||
values = {}
|
||||
i = 0
|
||||
while i < len(self.fields):
|
||||
field = self.fields[i]
|
||||
if not field.REAL:
|
||||
if i == fieldnum:
|
||||
fieldnum -= 1
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if i < fieldnum:
|
||||
values[field.name] = field.get_value(dateval)
|
||||
i += 1
|
||||
elif i > fieldnum:
|
||||
values[field.name] = field.get_min(dateval)
|
||||
i += 1
|
||||
else:
|
||||
value = field.get_value(dateval)
|
||||
maxval = field.get_max(dateval)
|
||||
if value == maxval:
|
||||
fieldnum -= 1
|
||||
i -= 1
|
||||
else:
|
||||
values[field.name] = value + 1
|
||||
i += 1
|
||||
|
||||
difference = datetime(**values) - dateval.replace(tzinfo=None)
|
||||
return self.timezone.normalize(dateval + difference), fieldnum
|
||||
|
||||
def _set_field_value(self, dateval, fieldnum, new_value):
|
||||
values = {}
|
||||
for i, field in enumerate(self.fields):
|
||||
if field.REAL:
|
||||
if i < fieldnum:
|
||||
values[field.name] = field.get_value(dateval)
|
||||
elif i > fieldnum:
|
||||
values[field.name] = field.get_min(dateval)
|
||||
else:
|
||||
values[field.name] = new_value
|
||||
|
||||
difference = datetime(**values) - dateval.replace(tzinfo=None)
|
||||
return self.timezone.normalize(dateval + difference)
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
if previous_fire_time:
|
||||
start_date = max(now, previous_fire_time + timedelta(microseconds=1))
|
||||
else:
|
||||
start_date = max(now, self.start_date) if self.start_date else now
|
||||
|
||||
fieldnum = 0
|
||||
next_date = datetime_ceil(start_date).astimezone(self.timezone)
|
||||
while 0 <= fieldnum < len(self.fields):
|
||||
field = self.fields[fieldnum]
|
||||
curr_value = field.get_value(next_date)
|
||||
next_value = field.get_next_value(next_date)
|
||||
|
||||
if next_value is None:
|
||||
# No valid value was found
|
||||
next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1)
|
||||
elif next_value > curr_value:
|
||||
# A valid, but higher than the starting value, was found
|
||||
if field.REAL:
|
||||
next_date = self._set_field_value(next_date, fieldnum, next_value)
|
||||
fieldnum += 1
|
||||
else:
|
||||
next_date, fieldnum = self._increment_field_value(next_date, fieldnum)
|
||||
else:
|
||||
# A valid value was found, no changes necessary
|
||||
fieldnum += 1
|
||||
|
||||
# Return if the date has rolled past the end date
|
||||
if self.end_date and next_date > self.end_date:
|
||||
return None
|
||||
|
||||
if fieldnum >= 0:
|
||||
return next_date
|
||||
|
||||
def __str__(self):
|
||||
options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
|
||||
return 'cron[%s]' % (', '.join(options))
|
||||
|
||||
def __repr__(self):
|
||||
options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
|
||||
if self.start_date:
|
||||
options.append("start_date='%s'" % datetime_repr(self.start_date))
|
||||
return '<%s (%s)>' % (self.__class__.__name__, ', '.join(options))
|
188
lib/apscheduler/triggers/cron/expressions.py
Normal file
188
lib/apscheduler/triggers/cron/expressions.py
Normal file
|
@ -0,0 +1,188 @@
|
|||
"""
|
||||
This module contains the expressions applicable for CronTrigger's fields.
|
||||
"""
|
||||
|
||||
from calendar import monthrange
|
||||
import re
|
||||
|
||||
from apscheduler.util import asint
|
||||
|
||||
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', 'WeekdayPositionExpression',
|
||||
'LastDayOfMonthExpression')
|
||||
|
||||
|
||||
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
|
||||
|
||||
|
||||
class AllExpression(object):
|
||||
value_re = re.compile(r'\*(?:/(?P<step>\d+))?$')
|
||||
|
||||
def __init__(self, step=None):
|
||||
self.step = asint(step)
|
||||
if self.step == 0:
|
||||
raise ValueError('Increment must be higher than 0')
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
start = field.get_value(date)
|
||||
minval = field.get_min(date)
|
||||
maxval = field.get_max(date)
|
||||
start = max(start, minval)
|
||||
|
||||
if not self.step:
|
||||
next = start
|
||||
else:
|
||||
distance_to_next = (self.step - (start - minval)) % self.step
|
||||
next = start + distance_to_next
|
||||
|
||||
if next <= maxval:
|
||||
return next
|
||||
|
||||
def __str__(self):
|
||||
if self.step:
|
||||
return '*/%d' % self.step
|
||||
return '*'
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%s)" % (self.__class__.__name__, self.step)
|
||||
|
||||
|
||||
class RangeExpression(AllExpression):
|
||||
value_re = re.compile(
|
||||
r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$')
|
||||
|
||||
def __init__(self, first, last=None, step=None):
|
||||
AllExpression.__init__(self, step)
|
||||
first = asint(first)
|
||||
last = asint(last)
|
||||
if last is None and step is None:
|
||||
last = first
|
||||
if last is not None and first > last:
|
||||
raise ValueError('The minimum value in a range must not be higher than the maximum')
|
||||
self.first = first
|
||||
self.last = last
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
start = field.get_value(date)
|
||||
minval = field.get_min(date)
|
||||
maxval = field.get_max(date)
|
||||
|
||||
# Apply range limits
|
||||
minval = max(minval, self.first)
|
||||
if self.last is not None:
|
||||
maxval = min(maxval, self.last)
|
||||
start = max(start, minval)
|
||||
|
||||
if not self.step:
|
||||
next = start
|
||||
else:
|
||||
distance_to_next = (self.step - (start - minval)) % self.step
|
||||
next = start + distance_to_next
|
||||
|
||||
if next <= maxval:
|
||||
return next
|
||||
|
||||
def __str__(self):
|
||||
if self.last != self.first and self.last is not None:
|
||||
range = '%d-%d' % (self.first, self.last)
|
||||
else:
|
||||
range = str(self.first)
|
||||
|
||||
if self.step:
|
||||
return '%s/%d' % (range, self.step)
|
||||
return range
|
||||
|
||||
def __repr__(self):
|
||||
args = [str(self.first)]
|
||||
if self.last != self.first and self.last is not None or self.step:
|
||||
args.append(str(self.last))
|
||||
if self.step:
|
||||
args.append(str(self.step))
|
||||
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
|
||||
|
||||
|
||||
class WeekdayRangeExpression(RangeExpression):
|
||||
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE)
|
||||
|
||||
def __init__(self, first, last=None):
|
||||
try:
|
||||
first_num = WEEKDAYS.index(first.lower())
|
||||
except ValueError:
|
||||
raise ValueError('Invalid weekday name "%s"' % first)
|
||||
|
||||
if last:
|
||||
try:
|
||||
last_num = WEEKDAYS.index(last.lower())
|
||||
except ValueError:
|
||||
raise ValueError('Invalid weekday name "%s"' % last)
|
||||
else:
|
||||
last_num = None
|
||||
|
||||
RangeExpression.__init__(self, first_num, last_num)
|
||||
|
||||
def __str__(self):
|
||||
if self.last != self.first and self.last is not None:
|
||||
return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last])
|
||||
return WEEKDAYS[self.first]
|
||||
|
||||
def __repr__(self):
|
||||
args = ["'%s'" % WEEKDAYS[self.first]]
|
||||
if self.last != self.first and self.last is not None:
|
||||
args.append("'%s'" % WEEKDAYS[self.last])
|
||||
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
|
||||
|
||||
|
||||
class WeekdayPositionExpression(AllExpression):
|
||||
options = ['1st', '2nd', '3rd', '4th', '5th', 'last']
|
||||
value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))' % '|'.join(options), re.IGNORECASE)
|
||||
|
||||
def __init__(self, option_name, weekday_name):
|
||||
try:
|
||||
self.option_num = self.options.index(option_name.lower())
|
||||
except ValueError:
|
||||
raise ValueError('Invalid weekday position "%s"' % option_name)
|
||||
|
||||
try:
|
||||
self.weekday = WEEKDAYS.index(weekday_name.lower())
|
||||
except ValueError:
|
||||
raise ValueError('Invalid weekday name "%s"' % weekday_name)
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
# Figure out the weekday of the month's first day and the number
|
||||
# of days in that month
|
||||
first_day_wday, last_day = monthrange(date.year, date.month)
|
||||
|
||||
# Calculate which day of the month is the first of the target weekdays
|
||||
first_hit_day = self.weekday - first_day_wday + 1
|
||||
if first_hit_day <= 0:
|
||||
first_hit_day += 7
|
||||
|
||||
# Calculate what day of the month the target weekday would be
|
||||
if self.option_num < 5:
|
||||
target_day = first_hit_day + self.option_num * 7
|
||||
else:
|
||||
target_day = first_hit_day + ((last_day - first_hit_day) / 7) * 7
|
||||
|
||||
if target_day <= last_day and target_day >= date.day:
|
||||
return target_day
|
||||
|
||||
def __str__(self):
|
||||
return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday])
|
||||
|
||||
def __repr__(self):
|
||||
return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], WEEKDAYS[self.weekday])
|
||||
|
||||
|
||||
class LastDayOfMonthExpression(AllExpression):
|
||||
value_re = re.compile(r'last', re.IGNORECASE)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_next_value(self, date, field):
|
||||
return monthrange(date.year, date.month)[1]
|
||||
|
||||
def __str__(self):
|
||||
return 'last'
|
||||
|
||||
def __repr__(self):
|
||||
return "%s()" % self.__class__.__name__
|
97
lib/apscheduler/triggers/cron/fields.py
Normal file
97
lib/apscheduler/triggers/cron/fields.py
Normal file
|
@ -0,0 +1,97 @@
|
|||
"""
|
||||
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
|
||||
fields.
|
||||
"""
|
||||
|
||||
from calendar import monthrange
|
||||
|
||||
from apscheduler.triggers.cron.expressions import (
|
||||
AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression)
|
||||
|
||||
|
||||
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField')
|
||||
|
||||
|
||||
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
|
||||
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59,
|
||||
'second': 59}
|
||||
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0,
|
||||
'second': 0}
|
||||
|
||||
|
||||
class BaseField(object):
|
||||
REAL = True
|
||||
COMPILERS = [AllExpression, RangeExpression]
|
||||
|
||||
def __init__(self, name, exprs, is_default=False):
|
||||
self.name = name
|
||||
self.is_default = is_default
|
||||
self.compile_expressions(exprs)
|
||||
|
||||
def get_min(self, dateval):
|
||||
return MIN_VALUES[self.name]
|
||||
|
||||
def get_max(self, dateval):
|
||||
return MAX_VALUES[self.name]
|
||||
|
||||
def get_value(self, dateval):
|
||||
return getattr(dateval, self.name)
|
||||
|
||||
def get_next_value(self, dateval):
|
||||
smallest = None
|
||||
for expr in self.expressions:
|
||||
value = expr.get_next_value(dateval, self)
|
||||
if smallest is None or (value is not None and value < smallest):
|
||||
smallest = value
|
||||
|
||||
return smallest
|
||||
|
||||
def compile_expressions(self, exprs):
|
||||
self.expressions = []
|
||||
|
||||
# Split a comma-separated expression list, if any
|
||||
exprs = str(exprs).strip()
|
||||
if ',' in exprs:
|
||||
for expr in exprs.split(','):
|
||||
self.compile_expression(expr)
|
||||
else:
|
||||
self.compile_expression(exprs)
|
||||
|
||||
def compile_expression(self, expr):
|
||||
for compiler in self.COMPILERS:
|
||||
match = compiler.value_re.match(expr)
|
||||
if match:
|
||||
compiled_expr = compiler(**match.groupdict())
|
||||
self.expressions.append(compiled_expr)
|
||||
return
|
||||
|
||||
raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name))
|
||||
|
||||
def __str__(self):
|
||||
expr_strings = (str(e) for e in self.expressions)
|
||||
return ','.join(expr_strings)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self)
|
||||
|
||||
|
||||
class WeekField(BaseField):
|
||||
REAL = False
|
||||
|
||||
def get_value(self, dateval):
|
||||
return dateval.isocalendar()[1]
|
||||
|
||||
|
||||
class DayOfMonthField(BaseField):
|
||||
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression]
|
||||
|
||||
def get_max(self, dateval):
|
||||
return monthrange(dateval.year, dateval.month)[1]
|
||||
|
||||
|
||||
class DayOfWeekField(BaseField):
|
||||
REAL = False
|
||||
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
|
||||
|
||||
def get_value(self, dateval):
|
||||
return dateval.weekday()
|
30
lib/apscheduler/triggers/date.py
Normal file
30
lib/apscheduler/triggers/date.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
from datetime import datetime
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import convert_to_datetime, datetime_repr, astimezone
|
||||
|
||||
|
||||
class DateTrigger(BaseTrigger):
|
||||
"""
|
||||
Triggers once on the given datetime. If ``run_date`` is left empty, current time is used.
|
||||
|
||||
:param datetime|str run_date: the date/time to run the job at
|
||||
:param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already
|
||||
"""
|
||||
|
||||
__slots__ = 'timezone', 'run_date'
|
||||
|
||||
def __init__(self, run_date=None, timezone=None):
|
||||
timezone = astimezone(timezone) or get_localzone()
|
||||
self.run_date = convert_to_datetime(run_date or datetime.now(), timezone, 'run_date')
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
return self.run_date if previous_fire_time is None else None
|
||||
|
||||
def __str__(self):
|
||||
return 'date[%s]' % datetime_repr(self.run_date)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date))
|
65
lib/apscheduler/triggers/interval.py
Normal file
65
lib/apscheduler/triggers/interval.py
Normal file
|
@ -0,0 +1,65 @@
|
|||
from datetime import timedelta, datetime
|
||||
from math import ceil
|
||||
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from apscheduler.triggers.base import BaseTrigger
|
||||
from apscheduler.util import convert_to_datetime, timedelta_seconds, datetime_repr, astimezone
|
||||
|
||||
|
||||
class IntervalTrigger(BaseTrigger):
|
||||
"""
|
||||
Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` + interval
|
||||
otherwise.
|
||||
|
||||
:param int weeks: number of weeks to wait
|
||||
:param int days: number of days to wait
|
||||
:param int hours: number of hours to wait
|
||||
:param int minutes: number of minutes to wait
|
||||
:param int seconds: number of seconds to wait
|
||||
:param datetime|str start_date: starting point for the interval calculation
|
||||
:param datetime|str end_date: latest possible date/time to trigger on
|
||||
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
|
||||
"""
|
||||
|
||||
__slots__ = 'timezone', 'start_date', 'end_date', 'interval'
|
||||
|
||||
def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, end_date=None, timezone=None):
|
||||
self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
|
||||
self.interval_length = timedelta_seconds(self.interval)
|
||||
if self.interval_length == 0:
|
||||
self.interval = timedelta(seconds=1)
|
||||
self.interval_length = 1
|
||||
|
||||
if timezone:
|
||||
self.timezone = astimezone(timezone)
|
||||
elif start_date and start_date.tzinfo:
|
||||
self.timezone = start_date.tzinfo
|
||||
elif end_date and end_date.tzinfo:
|
||||
self.timezone = end_date.tzinfo
|
||||
else:
|
||||
self.timezone = get_localzone()
|
||||
|
||||
start_date = start_date or (datetime.now(self.timezone) + self.interval)
|
||||
self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
|
||||
self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
|
||||
|
||||
def get_next_fire_time(self, previous_fire_time, now):
|
||||
if previous_fire_time:
|
||||
next_fire_time = previous_fire_time + self.interval
|
||||
elif self.start_date > now:
|
||||
next_fire_time = self.start_date
|
||||
else:
|
||||
timediff_seconds = timedelta_seconds(now - self.start_date)
|
||||
next_interval_num = int(ceil(timediff_seconds / self.interval_length))
|
||||
next_fire_time = self.start_date + self.interval * next_interval_num
|
||||
|
||||
if not self.end_date or next_fire_time <= self.end_date:
|
||||
return self.timezone.normalize(next_fire_time)
|
||||
|
||||
def __str__(self):
|
||||
return 'interval[%s]' % str(self.interval)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s (interval=%r, start_date='%s')>" % (self.__class__.__name__, self.interval,
|
||||
datetime_repr(self.start_date))
|
385
lib/apscheduler/util.py
Normal file
385
lib/apscheduler/util.py
Normal file
|
@ -0,0 +1,385 @@
|
|||
"""This module contains several handy functions primarily meant for internal use."""
|
||||
|
||||
from __future__ import division
|
||||
from datetime import date, datetime, time, timedelta, tzinfo
|
||||
from inspect import isfunction, ismethod, getargspec
|
||||
from calendar import timegm
|
||||
import re
|
||||
|
||||
from pytz import timezone, utc
|
||||
import six
|
||||
|
||||
try:
|
||||
from inspect import signature
|
||||
except ImportError: # pragma: nocover
|
||||
try:
|
||||
from funcsigs import signature
|
||||
except ImportError:
|
||||
signature = None
|
||||
|
||||
__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
|
||||
'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', 'obj_to_ref',
|
||||
'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args')
|
||||
|
||||
|
||||
class _Undefined(object):
|
||||
def __nonzero__(self):
|
||||
return False
|
||||
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return '<undefined>'
|
||||
|
||||
undefined = _Undefined() #: a unique object that only signifies that no value is defined
|
||||
|
||||
|
||||
def asint(text):
|
||||
"""
|
||||
Safely converts a string to an integer, returning None if the string is None.
|
||||
|
||||
:type text: str
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
if text is not None:
|
||||
return int(text)
|
||||
|
||||
|
||||
def asbool(obj):
|
||||
"""
|
||||
Interprets an object as a boolean value.
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
|
||||
if isinstance(obj, str):
|
||||
obj = obj.strip().lower()
|
||||
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
|
||||
return True
|
||||
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
|
||||
return False
|
||||
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
|
||||
return bool(obj)
|
||||
|
||||
|
||||
def astimezone(obj):
|
||||
"""
|
||||
Interprets an object as a timezone.
|
||||
|
||||
:rtype: tzinfo
|
||||
"""
|
||||
|
||||
if isinstance(obj, six.string_types):
|
||||
return timezone(obj)
|
||||
if isinstance(obj, tzinfo):
|
||||
if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'):
|
||||
raise TypeError('Only timezones from the pytz library are supported')
|
||||
if obj.zone == 'local':
|
||||
raise ValueError('Unable to determine the name of the local timezone -- use an explicit timezone instead')
|
||||
return obj
|
||||
if obj is not None:
|
||||
raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__)
|
||||
|
||||
|
||||
_DATE_REGEX = re.compile(
|
||||
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
|
||||
r'(?: (?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
|
||||
r'(?:\.(?P<microsecond>\d{1,6}))?)?')
|
||||
|
||||
|
||||
def convert_to_datetime(input, tz, arg_name):
|
||||
"""
|
||||
Converts the given object to a timezone aware datetime object.
|
||||
If a timezone aware datetime object is passed, it is returned unmodified.
|
||||
If a native datetime object is passed, it is given the specified timezone.
|
||||
If the input is a string, it is parsed as a datetime with the given timezone.
|
||||
|
||||
Date strings are accepted in three different forms: date only (Y-m-d),
|
||||
date with time (Y-m-d H:M:S) or with date+time with microseconds
|
||||
(Y-m-d H:M:S.micro).
|
||||
|
||||
:param str|datetime input: the datetime or string to convert to a timezone aware datetime
|
||||
:param datetime.tzinfo tz: timezone to interpret ``input`` in
|
||||
:param str arg_name: the name of the argument (used in an error message)
|
||||
:rtype: datetime
|
||||
"""
|
||||
|
||||
if input is None:
|
||||
return
|
||||
elif isinstance(input, datetime):
|
||||
datetime_ = input
|
||||
elif isinstance(input, date):
|
||||
datetime_ = datetime.combine(input, time())
|
||||
elif isinstance(input, six.string_types):
|
||||
m = _DATE_REGEX.match(input)
|
||||
if not m:
|
||||
raise ValueError('Invalid date string')
|
||||
values = [(k, int(v or 0)) for k, v in m.groupdict().items()]
|
||||
values = dict(values)
|
||||
datetime_ = datetime(**values)
|
||||
else:
|
||||
raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__))
|
||||
|
||||
if datetime_.tzinfo is not None:
|
||||
return datetime_
|
||||
if tz is None:
|
||||
raise ValueError('The "tz" argument must be specified if %s has no timezone information' % arg_name)
|
||||
if isinstance(tz, six.string_types):
|
||||
tz = timezone(tz)
|
||||
|
||||
try:
|
||||
return tz.localize(datetime_, is_dst=None)
|
||||
except AttributeError:
|
||||
raise TypeError('Only pytz timezones are supported (need the localize() and normalize() methods)')
|
||||
|
||||
|
||||
def datetime_to_utc_timestamp(timeval):
|
||||
"""
|
||||
Converts a datetime instance to a timestamp.
|
||||
|
||||
:type timeval: datetime
|
||||
:rtype: float
|
||||
"""
|
||||
|
||||
if timeval is not None:
|
||||
return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000
|
||||
|
||||
|
||||
def utc_timestamp_to_datetime(timestamp):
|
||||
"""
|
||||
Converts the given timestamp to a datetime instance.
|
||||
|
||||
:type timestamp: float
|
||||
:rtype: datetime
|
||||
"""
|
||||
|
||||
if timestamp is not None:
|
||||
return datetime.fromtimestamp(timestamp, utc)
|
||||
|
||||
|
||||
def timedelta_seconds(delta):
|
||||
"""
|
||||
Converts the given timedelta to seconds.
|
||||
|
||||
:type delta: timedelta
|
||||
:rtype: float
|
||||
"""
|
||||
|
||||
return delta.days * 24 * 60 * 60 + delta.seconds + \
|
||||
delta.microseconds / 1000000.0
|
||||
|
||||
|
||||
def datetime_ceil(dateval):
|
||||
"""
|
||||
Rounds the given datetime object upwards.
|
||||
|
||||
:type dateval: datetime
|
||||
"""
|
||||
|
||||
if dateval.microsecond > 0:
|
||||
return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
|
||||
return dateval
|
||||
|
||||
|
||||
def datetime_repr(dateval):
|
||||
return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None'
|
||||
|
||||
|
||||
def get_callable_name(func):
|
||||
"""
|
||||
Returns the best available display name for the given function/callable.
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
# the easy case (on Python 3.3+)
|
||||
if hasattr(func, '__qualname__'):
|
||||
return func.__qualname__
|
||||
|
||||
# class methods, bound and unbound methods
|
||||
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
|
||||
if f_self and hasattr(func, '__name__'):
|
||||
f_class = f_self if isinstance(f_self, type) else f_self.__class__
|
||||
else:
|
||||
f_class = getattr(func, 'im_class', None)
|
||||
|
||||
if f_class and hasattr(func, '__name__'):
|
||||
return '%s.%s' % (f_class.__name__, func.__name__)
|
||||
|
||||
# class or class instance
|
||||
if hasattr(func, '__call__'):
|
||||
# class
|
||||
if hasattr(func, '__name__'):
|
||||
return func.__name__
|
||||
|
||||
# instance of a class with a __call__ method
|
||||
return func.__class__.__name__
|
||||
|
||||
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
|
||||
|
||||
|
||||
def obj_to_ref(obj):
|
||||
"""
|
||||
Returns the path to the given object.
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
try:
|
||||
ref = '%s:%s' % (obj.__module__, get_callable_name(obj))
|
||||
obj2 = ref_to_obj(ref)
|
||||
if obj != obj2:
|
||||
raise ValueError
|
||||
except Exception:
|
||||
raise ValueError('Cannot determine the reference to %r' % obj)
|
||||
|
||||
return ref
|
||||
|
||||
|
||||
def ref_to_obj(ref):
|
||||
"""
|
||||
Returns the object pointed to by ``ref``.
|
||||
|
||||
:type ref: str
|
||||
"""
|
||||
|
||||
if not isinstance(ref, six.string_types):
|
||||
raise TypeError('References must be strings')
|
||||
if ':' not in ref:
|
||||
raise ValueError('Invalid reference')
|
||||
|
||||
modulename, rest = ref.split(':', 1)
|
||||
try:
|
||||
obj = __import__(modulename)
|
||||
except ImportError:
|
||||
raise LookupError('Error resolving reference %s: could not import module' % ref)
|
||||
|
||||
try:
|
||||
for name in modulename.split('.')[1:] + rest.split('.'):
|
||||
obj = getattr(obj, name)
|
||||
return obj
|
||||
except Exception:
|
||||
raise LookupError('Error resolving reference %s: error looking up object' % ref)
|
||||
|
||||
|
||||
def maybe_ref(ref):
|
||||
"""
|
||||
Returns the object that the given reference points to, if it is indeed a reference.
|
||||
If it is not a reference, the object is returned as-is.
|
||||
"""
|
||||
|
||||
if not isinstance(ref, str):
|
||||
return ref
|
||||
return ref_to_obj(ref)
|
||||
|
||||
|
||||
if six.PY2:
|
||||
def repr_escape(string):
|
||||
if isinstance(string, six.text_type):
|
||||
return string.encode('ascii', 'backslashreplace')
|
||||
return string
|
||||
else:
|
||||
repr_escape = lambda string: string
|
||||
|
||||
|
||||
def check_callable_args(func, args, kwargs):
|
||||
"""
|
||||
Ensures that the given callable can be called with the given arguments.
|
||||
|
||||
:type args: tuple
|
||||
:type kwargs: dict
|
||||
"""
|
||||
|
||||
pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
|
||||
positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
|
||||
unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
|
||||
unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
|
||||
unmatched_args = list(args) # args that didn't match any of the parameters in the signature
|
||||
unmatched_kwargs = list(kwargs) # kwargs that didn't match any of the parameters in the signature
|
||||
has_varargs = has_var_kwargs = False # indicates if the signature defines *args and **kwargs respectively
|
||||
|
||||
if signature:
|
||||
try:
|
||||
sig = signature(func)
|
||||
except ValueError:
|
||||
return # signature() doesn't work against every kind of callable
|
||||
|
||||
for param in six.itervalues(sig.parameters):
|
||||
if param.kind == param.POSITIONAL_OR_KEYWORD:
|
||||
if param.name in unmatched_kwargs and unmatched_args:
|
||||
pos_kwargs_conflicts.append(param.name)
|
||||
elif unmatched_args:
|
||||
del unmatched_args[0]
|
||||
elif param.name in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(param.name)
|
||||
elif param.default is param.empty:
|
||||
unsatisfied_args.append(param.name)
|
||||
elif param.kind == param.POSITIONAL_ONLY:
|
||||
if unmatched_args:
|
||||
del unmatched_args[0]
|
||||
elif param.name in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(param.name)
|
||||
positional_only_kwargs.append(param.name)
|
||||
elif param.default is param.empty:
|
||||
unsatisfied_args.append(param.name)
|
||||
elif param.kind == param.KEYWORD_ONLY:
|
||||
if param.name in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(param.name)
|
||||
elif param.default is param.empty:
|
||||
unsatisfied_kwargs.append(param.name)
|
||||
elif param.kind == param.VAR_POSITIONAL:
|
||||
has_varargs = True
|
||||
elif param.kind == param.VAR_KEYWORD:
|
||||
has_var_kwargs = True
|
||||
else:
|
||||
if not isfunction(func) and not ismethod(func) and hasattr(func, '__call__'):
|
||||
func = func.__call__
|
||||
|
||||
try:
|
||||
argspec = getargspec(func)
|
||||
except TypeError:
|
||||
return # getargspec() doesn't work certain callables
|
||||
|
||||
argspec_args = argspec.args if not ismethod(func) else argspec.args[1:]
|
||||
has_varargs = bool(argspec.varargs)
|
||||
has_var_kwargs = bool(argspec.keywords)
|
||||
for arg, default in six.moves.zip_longest(argspec_args, argspec.defaults or (), fillvalue=undefined):
|
||||
if arg in unmatched_kwargs and unmatched_args:
|
||||
pos_kwargs_conflicts.append(arg)
|
||||
elif unmatched_args:
|
||||
del unmatched_args[0]
|
||||
elif arg in unmatched_kwargs:
|
||||
unmatched_kwargs.remove(arg)
|
||||
elif default is undefined:
|
||||
unsatisfied_args.append(arg)
|
||||
|
||||
# Make sure there are no conflicts between args and kwargs
|
||||
if pos_kwargs_conflicts:
|
||||
raise ValueError('The following arguments are supplied in both args and kwargs: %s' %
|
||||
', '.join(pos_kwargs_conflicts))
|
||||
|
||||
# Check if keyword arguments are being fed to positional-only parameters
|
||||
if positional_only_kwargs:
|
||||
raise ValueError('The following arguments cannot be given as keyword arguments: %s' %
|
||||
', '.join(positional_only_kwargs))
|
||||
|
||||
# Check that the number of positional arguments minus the number of matched kwargs matches the argspec
|
||||
if unsatisfied_args:
|
||||
raise ValueError('The following arguments have not been supplied: %s' % ', '.join(unsatisfied_args))
|
||||
|
||||
# Check that all keyword-only arguments have been supplied
|
||||
if unsatisfied_kwargs:
|
||||
raise ValueError('The following keyword-only arguments have not been supplied in kwargs: %s' %
|
||||
', '.join(unsatisfied_kwargs))
|
||||
|
||||
# Check that the callable can accept the given number of positional arguments
|
||||
if not has_varargs and unmatched_args:
|
||||
raise ValueError('The list of positional arguments is longer than the target callable can handle '
|
||||
'(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args)))
|
||||
|
||||
# Check that the callable can accept the given keyword arguments
|
||||
if not has_var_kwargs and unmatched_kwargs:
|
||||
raise ValueError('The target callable does not accept the following keyword arguments: %s' %
|
||||
', '.join(unmatched_kwargs))
|
Loading…
Add table
Add a link
Reference in a new issue