Bump apscheduler from 3.9.1.post1 to 3.10.0 (#1986)

* Bump apscheduler from 3.9.1.post1 to 3.10.0

Bumps [apscheduler](https://github.com/agronholm/apscheduler) from 3.9.1.post1 to 3.10.0.
- [Release notes](https://github.com/agronholm/apscheduler/releases)
- [Changelog](https://github.com/agronholm/apscheduler/blob/3.10.0/docs/versionhistory.rst)
- [Commits](https://github.com/agronholm/apscheduler/compare/3.9.1.post1...3.10.0)

---
updated-dependencies:
- dependency-name: apscheduler
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update apscheduler==3.10.0

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com>

[skip ci]
This commit is contained in:
dependabot[bot] 2023-03-02 20:55:46 -08:00 committed by GitHub
parent 1466a391d1
commit ded93ef2f5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 43 additions and 59 deletions

View file

@ -3,12 +3,8 @@ from __future__ import absolute_import
import sys import sys
from apscheduler.executors.base import BaseExecutor, run_job from apscheduler.executors.base import BaseExecutor, run_job
from apscheduler.util import iscoroutinefunction_partial
try:
from apscheduler.executors.base_py3 import run_coroutine_job from apscheduler.executors.base_py3 import run_coroutine_job
except ImportError: from apscheduler.util import iscoroutinefunction_partial
run_coroutine_job = None
class AsyncIOExecutor(BaseExecutor): class AsyncIOExecutor(BaseExecutor):
@ -46,11 +42,8 @@ class AsyncIOExecutor(BaseExecutor):
self._run_job_success(job.id, events) self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func): if iscoroutinefunction_partial(job.func):
if run_coroutine_job is not None:
coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
f = self._eventloop.create_task(coro) f = self._eventloop.create_task(coro)
else:
raise Exception('Executing coroutine based jobs is not supported with Trollius')
else: else:
f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times, f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times,
self._logger.name) self._logger.name)

View file

@ -57,7 +57,7 @@ class SQLAlchemyJobStore(BaseJobStore):
# 25 = precision that translates to an 8-byte float # 25 = precision that translates to an 8-byte float
self.jobs_t = Table( self.jobs_t = Table(
tablename, metadata, tablename, metadata,
Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True), Column('id', Unicode(191), primary_key=True),
Column('next_run_time', Float(25), index=True), Column('next_run_time', Float(25), index=True),
Column('job_state', LargeBinary, nullable=False), Column('job_state', LargeBinary, nullable=False),
schema=tableschema schema=tableschema
@ -68,8 +68,9 @@ class SQLAlchemyJobStore(BaseJobStore):
self.jobs_t.create(self.engine, True) self.jobs_t.create(self.engine, True)
def lookup_job(self, job_id): def lookup_job(self, job_id):
selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id) selectable = select(self.jobs_t.c.job_state).where(self.jobs_t.c.id == job_id)
job_state = self.engine.execute(selectable).scalar() with self.engine.begin() as connection:
job_state = connection.execute(selectable).scalar()
return self._reconstitute_job(job_state) if job_state else None return self._reconstitute_job(job_state) if job_state else None
def get_due_jobs(self, now): def get_due_jobs(self, now):
@ -77,10 +78,11 @@ class SQLAlchemyJobStore(BaseJobStore):
return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp) return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
def get_next_run_time(self): def get_next_run_time(self):
selectable = select([self.jobs_t.c.next_run_time]).\ selectable = select(self.jobs_t.c.next_run_time).\
where(self.jobs_t.c.next_run_time != null()).\ where(self.jobs_t.c.next_run_time != null()).\
order_by(self.jobs_t.c.next_run_time).limit(1) order_by(self.jobs_t.c.next_run_time).limit(1)
next_run_time = self.engine.execute(selectable).scalar() with self.engine.begin() as connection:
next_run_time = connection.execute(selectable).scalar()
return utc_timestamp_to_datetime(next_run_time) return utc_timestamp_to_datetime(next_run_time)
def get_all_jobs(self): def get_all_jobs(self):
@ -94,8 +96,9 @@ class SQLAlchemyJobStore(BaseJobStore):
'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
}) })
with self.engine.begin() as connection:
try: try:
self.engine.execute(insert) connection.execute(insert)
except IntegrityError: except IntegrityError:
raise ConflictingIdError(job.id) raise ConflictingIdError(job.id)
@ -104,19 +107,22 @@ class SQLAlchemyJobStore(BaseJobStore):
'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
}).where(self.jobs_t.c.id == job.id) }).where(self.jobs_t.c.id == job.id)
result = self.engine.execute(update) with self.engine.begin() as connection:
result = connection.execute(update)
if result.rowcount == 0: if result.rowcount == 0:
raise JobLookupError(job.id) raise JobLookupError(job.id)
def remove_job(self, job_id): def remove_job(self, job_id):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
result = self.engine.execute(delete) with self.engine.begin() as connection:
result = connection.execute(delete)
if result.rowcount == 0: if result.rowcount == 0:
raise JobLookupError(job_id) raise JobLookupError(job_id)
def remove_all_jobs(self): def remove_all_jobs(self):
delete = self.jobs_t.delete() delete = self.jobs_t.delete()
self.engine.execute(delete) with self.engine.begin() as connection:
connection.execute(delete)
def shutdown(self): def shutdown(self):
self.engine.dispose() self.engine.dispose()
@ -132,11 +138,12 @@ class SQLAlchemyJobStore(BaseJobStore):
def _get_jobs(self, *conditions): def _get_jobs(self, *conditions):
jobs = [] jobs = []
selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\ selectable = select(self.jobs_t.c.id, self.jobs_t.c.job_state).\
order_by(self.jobs_t.c.next_run_time) order_by(self.jobs_t.c.next_run_time)
selectable = selectable.where(and_(*conditions)) if conditions else selectable selectable = selectable.where(and_(*conditions)) if conditions else selectable
failed_job_ids = set() failed_job_ids = set()
for row in self.engine.execute(selectable): with self.engine.begin() as connection:
for row in connection.execute(selectable):
try: try:
jobs.append(self._reconstitute_job(row.job_state)) jobs.append(self._reconstitute_job(row.job_state))
except BaseException: except BaseException:
@ -146,7 +153,7 @@ class SQLAlchemyJobStore(BaseJobStore):
# Remove all the jobs we failed to restore # Remove all the jobs we failed to restore
if failed_job_ids: if failed_job_ids:
delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids)) delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids))
self.engine.execute(delete) connection.execute(delete)
return jobs return jobs

View file

@ -1,18 +1,10 @@
from __future__ import absolute_import from __future__ import absolute_import
import asyncio
from functools import wraps, partial from functools import wraps, partial
from apscheduler.schedulers.base import BaseScheduler from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref from apscheduler.util import maybe_ref
try:
import asyncio
except ImportError: # pragma: nocover
try:
import trollius as asyncio
except ImportError:
raise ImportError(
'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed')
def run_in_event_loop(func): def run_in_event_loop(func):
@wraps(func) @wraps(func)

View file

@ -2,6 +2,7 @@
from __future__ import division from __future__ import division
from asyncio import iscoroutinefunction
from datetime import date, datetime, time, timedelta, tzinfo from datetime import date, datetime, time, timedelta, tzinfo
from calendar import timegm from calendar import timegm
from functools import partial from functools import partial
@ -22,15 +23,6 @@ try:
except ImportError: except ImportError:
TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows
try:
from asyncio import iscoroutinefunction
except ImportError:
try:
from trollius import iscoroutinefunction
except ImportError:
def iscoroutinefunction(func):
return False
__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp', __all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name',
'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args', 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args',

View file

@ -1,4 +1,4 @@
apscheduler==3.9.1.post1 apscheduler==3.10.0
importlib-metadata==6.0.0 importlib-metadata==6.0.0
importlib-resources==5.12.0 importlib-resources==5.12.0
pyinstaller==5.7.0 pyinstaller==5.7.0

View file

@ -1,5 +1,5 @@
appdirs==1.4.4 appdirs==1.4.4
apscheduler==3.9.1.post1 apscheduler==3.10.0
arrow==1.2.3 arrow==1.2.3
backports.csv==1.0.7 backports.csv==1.0.7
backports.functools-lru-cache==1.6.4 backports.functools-lru-cache==1.6.4