diff --git a/lib/cheroot/__init__.pyi b/lib/cheroot/__init__.pyi new file mode 100644 index 00000000..bda5b5a7 --- /dev/null +++ b/lib/cheroot/__init__.pyi @@ -0,0 +1 @@ +__version__: str diff --git a/lib/cheroot/cli.pyi b/lib/cheroot/cli.pyi new file mode 100644 index 00000000..b9803b3e --- /dev/null +++ b/lib/cheroot/cli.pyi @@ -0,0 +1,32 @@ +from typing import Any + +class BindLocation: ... + +class TCPSocket(BindLocation): + bind_addr: Any + def __init__(self, address, port) -> None: ... + +class UnixSocket(BindLocation): + bind_addr: Any + def __init__(self, path) -> None: ... + +class AbstractSocket(BindLocation): + bind_addr: Any + def __init__(self, abstract_socket) -> None: ... + +class Application: + @classmethod + def resolve(cls, full_path): ... + wsgi_app: Any + def __init__(self, wsgi_app) -> None: ... + def server_args(self, parsed_args): ... + def server(self, parsed_args): ... + +class GatewayYo: + gateway: Any + def __init__(self, gateway) -> None: ... + def server(self, parsed_args): ... + +def parse_wsgi_bind_location(bind_addr_string: str): ... +def parse_wsgi_bind_addr(bind_addr_string: str): ... +def main() -> None: ... diff --git a/lib/cheroot/connections.py b/lib/cheroot/connections.py index 7debcbfd..181e3731 100644 --- a/lib/cheroot/connections.py +++ b/lib/cheroot/connections.py @@ -12,6 +12,7 @@ import time from . import errors from ._compat import selectors from ._compat import suppress +from ._compat import IS_WINDOWS from .makefile import MakeFile import six @@ -152,17 +153,18 @@ class ConnectionManager: conn.socket.fileno(), selectors.EVENT_READ, data=conn, ) - def _expire(self): - """Expire least recently used connections. + def _expire(self, threshold): + r"""Expire least recently used connections. - This happens if there are either too many open connections, or if the - connections have been timed out. + :param threshold: Connections that have not been used within this \ + duration (in seconds), are considered expired and \ + are closed and removed. + :type threshold: float This should be called periodically. """ # find any connections still registered with the selector # that have not been active recently enough. - threshold = time.time() - self.server.timeout timed_out_connections = [ (sock_fd, conn) for (sock_fd, conn) in self._selector.connections @@ -203,11 +205,37 @@ class ConnectionManager: self._serving = False def _run(self, expiration_interval): + r"""Run connection handler loop until stop was requested. + + :param expiration_interval: Interval, in seconds, at which \ + connections will be checked for \ + expiration. + :type expiration_interval: float + + Use ``expiration_interval`` as ``select()`` timeout + to assure expired connections are closed in time. + + On Windows cap the timeout to 0.05 seconds + as ``select()`` does not return when a socket is ready. + """ last_expiration_check = time.time() + if IS_WINDOWS: + # 0.05 seconds are used as an empirically obtained balance between + # max connection delay and idle system load. Benchmarks show a + # mean processing time per connection of ~0.03 seconds on Linux + # and with 0.01 seconds timeout on Windows: + # https://github.com/cherrypy/cheroot/pull/352 + # While this highly depends on system and hardware, 0.05 seconds + # max delay should hence usually not significantly increase the + # mean time/delay per connection, but significantly reduce idle + # system load by reducing socket loops to 1/5 with 0.01 seconds. + select_timeout = min(expiration_interval, 0.05) + else: + select_timeout = expiration_interval while not self._stop_requested: try: - active_list = self._selector.select(timeout=0.01) + active_list = self._selector.select(timeout=select_timeout) except OSError: self._remove_invalid_sockets() continue @@ -226,7 +254,7 @@ class ConnectionManager: now = time.time() if (now - last_expiration_check) > expiration_interval: - self._expire() + self._expire(threshold=now - self.server.timeout) last_expiration_check = now def _remove_invalid_sockets(self): diff --git a/lib/cheroot/connections.pyi b/lib/cheroot/connections.pyi new file mode 100644 index 00000000..528ad765 --- /dev/null +++ b/lib/cheroot/connections.pyi @@ -0,0 +1,23 @@ +from typing import Any + +def prevent_socket_inheritance(sock) -> None: ... + +class _ThreadsafeSelector: + def __init__(self) -> None: ... + def __len__(self): ... + @property + def connections(self) -> None: ... + def register(self, fileobj, events, data: Any | None = ...): ... + def unregister(self, fileobj): ... + def select(self, timeout: Any | None = ...): ... + def close(self) -> None: ... + +class ConnectionManager: + server: Any + def __init__(self, server) -> None: ... + def put(self, conn) -> None: ... + def stop(self) -> None: ... + def run(self, expiration_interval) -> None: ... + def close(self) -> None: ... + @property + def can_add_keepalive_connection(self): ... diff --git a/lib/cheroot/errors.pyi b/lib/cheroot/errors.pyi new file mode 100644 index 00000000..e78a7585 --- /dev/null +++ b/lib/cheroot/errors.pyi @@ -0,0 +1,13 @@ +from typing import Any, List, Set, Tuple + +class MaxSizeExceeded(Exception): ... +class NoSSLError(Exception): ... +class FatalSSLAlert(Exception): ... + +def plat_specific_errors(*errnames: str) -> List[int]: ... + +socket_error_eintr: List[int] +socket_errors_to_ignore: List[int] +socket_errors_nonblocking: List[int] +acceptable_sock_shutdown_error_codes: Set[int] +acceptable_sock_shutdown_exceptions: Tuple[Exception] diff --git a/lib/cheroot/makefile.pyi b/lib/cheroot/makefile.pyi new file mode 100644 index 00000000..11748505 --- /dev/null +++ b/lib/cheroot/makefile.pyi @@ -0,0 +1,32 @@ +import io + +SOCK_WRITE_BLOCKSIZE: int + +class BufferedWriter(io.BufferedWriter): + def write(self, b): ... + +class MakeFile_PY2: + bytes_read: int + bytes_written: int + def __init__(self, *args, **kwargs) -> None: ... + def write(self, data) -> None: ... + def send(self, data): ... + def flush(self) -> None: ... + def recv(self, size): ... + class FauxSocket: ... + def read(self, size: int = ...): ... + def readline(self, size: int = ...): ... + def has_data(self): ... + +class StreamReader(io.BufferedReader): + bytes_read: int + def __init__(self, sock, mode: str = ..., bufsize=...) -> None: ... + def read(self, *args, **kwargs): ... + def has_data(self): ... + +class StreamWriter(BufferedWriter): + bytes_written: int + def __init__(self, sock, mode: str = ..., bufsize=...) -> None: ... + def write(self, val, *args, **kwargs): ... + +def MakeFile(sock, mode: str = ..., bufsize=...): ... diff --git a/lib/jaraco/classes/__init__.py b/lib/cheroot/py.typed similarity index 100% rename from lib/jaraco/classes/__init__.py rename to lib/cheroot/py.typed diff --git a/lib/cheroot/server.py b/lib/cheroot/server.py index 8b59a33a..d92988ab 100644 --- a/lib/cheroot/server.py +++ b/lib/cheroot/server.py @@ -57,6 +57,10 @@ will run the server forever) or use invoking :func:`prepare() And now for a trivial doctest to exercise the test suite +.. testsetup:: + + from cheroot.server import HTTPServer + >>> 'HTTPServer' in globals() True """ @@ -273,7 +277,8 @@ class SizeCheckWrapper: def read(self, size=None): """Read a chunk from ``rfile`` buffer and return it. - :param int size: amount of data to read + :param size: amount of data to read + :type size: int :returns: chunk from ``rfile``, limited by size if specified :rtype: bytes @@ -286,7 +291,8 @@ class SizeCheckWrapper: def readline(self, size=None): """Read a single line from ``rfile`` buffer and return it. - :param int size: minimum amount of data to read + :param size: minimum amount of data to read + :type size: int :returns: one line from ``rfile`` :rtype: bytes @@ -312,7 +318,8 @@ class SizeCheckWrapper: def readlines(self, sizehint=0): """Read all lines from ``rfile`` buffer and return them. - :param int sizehint: hint of minimum amount of data to read + :param sizehint: hint of minimum amount of data to read + :type sizehint: int :returns: lines of bytes read from ``rfile`` :rtype: list[bytes] @@ -362,7 +369,8 @@ class KnownLengthRFile: def read(self, size=None): """Read a chunk from ``rfile`` buffer and return it. - :param int size: amount of data to read + :param size: amount of data to read + :type size: int :rtype: bytes :returns: chunk from ``rfile``, limited by size if specified @@ -381,7 +389,8 @@ class KnownLengthRFile: def readline(self, size=None): """Read a single line from ``rfile`` buffer and return it. - :param int size: minimum amount of data to read + :param size: minimum amount of data to read + :type size: int :returns: one line from ``rfile`` :rtype: bytes @@ -400,7 +409,8 @@ class KnownLengthRFile: def readlines(self, sizehint=0): """Read all lines from ``rfile`` buffer and return them. - :param int sizehint: hint of minimum amount of data to read + :param sizehint: hint of minimum amount of data to read + :type sizehint: int :returns: lines of bytes read from ``rfile`` :rtype: list[bytes] @@ -501,7 +511,8 @@ class ChunkedRFile: def read(self, size=None): """Read a chunk from ``rfile`` buffer and return it. - :param int size: amount of data to read + :param size: amount of data to read + :type size: int :returns: chunk from ``rfile``, limited by size if specified :rtype: bytes @@ -532,7 +543,8 @@ class ChunkedRFile: def readline(self, size=None): """Read a single line from ``rfile`` buffer and return it. - :param int size: minimum amount of data to read + :param size: minimum amount of data to read + :type size: int :returns: one line from ``rfile`` :rtype: bytes @@ -573,7 +585,8 @@ class ChunkedRFile: def readlines(self, sizehint=0): """Read all lines from ``rfile`` buffer and return them. - :param int sizehint: hint of minimum amount of data to read + :param sizehint: hint of minimum amount of data to read + :type sizehint: int :returns: lines of bytes read from ``rfile`` :rtype: list[bytes] @@ -1777,7 +1790,7 @@ class HTTPServer: info = [(sock_type, socket.SOCK_STREAM, 0, '', bind_addr)] for res in info: - af, socktype, proto, canonname, sa = res + af, socktype, proto, _canonname, sa = res try: self.bind(af, socktype, proto) break @@ -1841,7 +1854,7 @@ class HTTPServer: """Context manager for running this server in a thread.""" self.prepare() thread = threading.Thread(target=self.serve) - thread.setDaemon(True) + thread.daemon = True thread.start() try: yield thread @@ -2118,7 +2131,7 @@ class HTTPServer: host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, ): - af, socktype, proto, canonname, sa = res + af, socktype, proto, _canonname, _sa = res s = None try: s = socket.socket(af, socktype, proto) diff --git a/lib/cheroot/server.pyi b/lib/cheroot/server.pyi new file mode 100644 index 00000000..864adff4 --- /dev/null +++ b/lib/cheroot/server.pyi @@ -0,0 +1,172 @@ +from typing import Any + +class HeaderReader: + def __call__(self, rfile, hdict: Any | None = ...): ... + +class DropUnderscoreHeaderReader(HeaderReader): ... + +class SizeCheckWrapper: + rfile: Any + maxlen: Any + bytes_read: int + def __init__(self, rfile, maxlen) -> None: ... + def read(self, size: Any | None = ...): ... + def readline(self, size: Any | None = ...): ... + def readlines(self, sizehint: int = ...): ... + def close(self) -> None: ... + def __iter__(self): ... + def __next__(self): ... + next: Any + +class KnownLengthRFile: + rfile: Any + remaining: Any + def __init__(self, rfile, content_length) -> None: ... + def read(self, size: Any | None = ...): ... + def readline(self, size: Any | None = ...): ... + def readlines(self, sizehint: int = ...): ... + def close(self) -> None: ... + def __iter__(self): ... + def __next__(self): ... + next: Any + +class ChunkedRFile: + rfile: Any + maxlen: Any + bytes_read: int + buffer: Any + bufsize: Any + closed: bool + def __init__(self, rfile, maxlen, bufsize: int = ...) -> None: ... + def read(self, size: Any | None = ...): ... + def readline(self, size: Any | None = ...): ... + def readlines(self, sizehint: int = ...): ... + def read_trailer_lines(self) -> None: ... + def close(self) -> None: ... + +class HTTPRequest: + server: Any + conn: Any + inheaders: Any + outheaders: Any + ready: bool + close_connection: bool + chunked_write: bool + header_reader: Any + started_request: bool + scheme: bytes + response_protocol: str + status: str + sent_headers: bool + chunked_read: bool + proxy_mode: Any + strict_mode: Any + def __init__(self, server, conn, proxy_mode: bool = ..., strict_mode: bool = ...) -> None: ... + rfile: Any + def parse_request(self) -> None: ... + uri: Any + method: Any + authority: Any + path: Any + qs: Any + request_protocol: Any + def read_request_line(self): ... + def read_request_headers(self): ... + def respond(self) -> None: ... + def simple_response(self, status, msg: str = ...) -> None: ... + def ensure_headers_sent(self) -> None: ... + def write(self, chunk) -> None: ... + def send_headers(self) -> None: ... + +class HTTPConnection: + remote_addr: Any + remote_port: Any + ssl_env: Any + rbufsize: Any + wbufsize: Any + RequestHandlerClass: Any + peercreds_enabled: bool + peercreds_resolve_enabled: bool + last_used: Any + server: Any + socket: Any + rfile: Any + wfile: Any + requests_seen: int + def __init__(self, server, sock, makefile=...) -> None: ... + def communicate(self): ... + linger: bool + def close(self) -> None: ... + def get_peer_creds(self): ... + @property + def peer_pid(self): ... + @property + def peer_uid(self): ... + @property + def peer_gid(self): ... + def resolve_peer_creds(self): ... + @property + def peer_user(self): ... + @property + def peer_group(self): ... + +class HTTPServer: + gateway: Any + minthreads: Any + maxthreads: Any + server_name: Any + protocol: str + request_queue_size: int + shutdown_timeout: int + timeout: int + expiration_interval: float + version: Any + software: Any + ready: bool + max_request_header_size: int + max_request_body_size: int + nodelay: bool + ConnectionClass: Any + ssl_adapter: Any + peercreds_enabled: bool + peercreds_resolve_enabled: bool + keep_alive_conn_limit: int + requests: Any + def __init__(self, bind_addr, gateway, minthreads: int = ..., maxthreads: int = ..., server_name: Any | None = ..., peercreds_enabled: bool = ..., peercreds_resolve_enabled: bool = ...) -> None: ... + stats: Any + def clear_stats(self): ... + def runtime(self): ... + @property + def bind_addr(self): ... + @bind_addr.setter + def bind_addr(self, value) -> None: ... + def safe_start(self) -> None: ... + socket: Any + def prepare(self) -> None: ... + def serve(self) -> None: ... + def start(self) -> None: ... + @property + def can_add_keepalive_connection(self): ... + def put_conn(self, conn) -> None: ... + def error_log(self, msg: str = ..., level: int = ..., traceback: bool = ...) -> None: ... + def bind(self, family, type, proto: int = ...): ... + def bind_unix_socket(self, bind_addr): ... + @staticmethod + def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter): ... + @staticmethod + def bind_socket(socket_, bind_addr): ... + @staticmethod + def resolve_real_bind_addr(socket_): ... + def process_conn(self, conn) -> None: ... + @property + def interrupt(self): ... + @interrupt.setter + def interrupt(self, interrupt) -> None: ... + def stop(self) -> None: ... + +class Gateway: + req: Any + def __init__(self, req) -> None: ... + def respond(self) -> None: ... + +def get_ssl_adapter_class(name: str = ...): ... diff --git a/lib/cheroot/ssl/__init__.pyi b/lib/cheroot/ssl/__init__.pyi new file mode 100644 index 00000000..a9807660 --- /dev/null +++ b/lib/cheroot/ssl/__init__.pyi @@ -0,0 +1,19 @@ +from abc import abstractmethod +from typing import Any + +class Adapter(): + certificate: Any + private_key: Any + certificate_chain: Any + ciphers: Any + context: Any + @abstractmethod + def __init__(self, certificate, private_key, certificate_chain: Any | None = ..., ciphers: Any | None = ...): ... + @abstractmethod + def bind(self, sock): ... + @abstractmethod + def wrap(self, sock): ... + @abstractmethod + def get_environ(self): ... + @abstractmethod + def makefile(self, sock, mode: str = ..., bufsize: int = ...): ... diff --git a/lib/cheroot/ssl/builtin.pyi b/lib/cheroot/ssl/builtin.pyi new file mode 100644 index 00000000..fdc656e0 --- /dev/null +++ b/lib/cheroot/ssl/builtin.pyi @@ -0,0 +1,18 @@ +from typing import Any +from . import Adapter + +generic_socket_error: OSError +DEFAULT_BUFFER_SIZE: int + +class BuiltinSSLAdapter(Adapter): + CERT_KEY_TO_ENV: Any + CERT_KEY_TO_LDAP_CODE: Any + def __init__(self, certificate, private_key, certificate_chain: Any | None = ..., ciphers: Any | None = ...) -> None: ... + @property + def context(self): ... + @context.setter + def context(self, context) -> None: ... + def bind(self, sock): ... + def wrap(self, sock): ... + def get_environ(self): ... + def makefile(self, sock, mode: str = ..., bufsize: int = ...): ... diff --git a/lib/cheroot/ssl/pyopenssl.pyi b/lib/cheroot/ssl/pyopenssl.pyi new file mode 100644 index 00000000..d5b93471 --- /dev/null +++ b/lib/cheroot/ssl/pyopenssl.pyi @@ -0,0 +1,30 @@ +from . import Adapter +from ..makefile import StreamReader, StreamWriter +from OpenSSL import SSL +from typing import Any + +ssl_conn_type: SSL.Connection + +class SSLFileobjectMixin: + ssl_timeout: int + ssl_retry: float + def recv(self, size): ... + def readline(self, size: int = ...): ... + def sendall(self, *args, **kwargs): ... + def send(self, *args, **kwargs): ... + +class SSLFileobjectStreamReader(SSLFileobjectMixin, StreamReader): ... # type:ignore +class SSLFileobjectStreamWriter(SSLFileobjectMixin, StreamWriter): ... # type:ignore + +class SSLConnectionProxyMeta: + def __new__(mcl, name, bases, nmspc): ... + +class SSLConnection(): + def __init__(self, *args) -> None: ... + +class pyOpenSSLAdapter(Adapter): + def __init__(self, certificate, private_key, certificate_chain: Any | None = ..., ciphers: Any | None = ...) -> None: ... + def bind(self, sock): ... + def wrap(self, sock): ... + def get_environ(self): ... + def makefile(self, sock, mode: str = ..., bufsize: int = ...): ... diff --git a/lib/cheroot/test/_pytest_plugin.py b/lib/cheroot/test/_pytest_plugin.py index 2bba9aa9..012211df 100644 --- a/lib/cheroot/test/_pytest_plugin.py +++ b/lib/cheroot/test/_pytest_plugin.py @@ -35,4 +35,16 @@ def pytest_load_initial_conftests(early_config, parser, args): '= resource_limit - finally: - # Stop our server - httpserver.stop() + def native_process_conn(conn): + native_process_conn.filenos.add(conn.socket.fileno()) + return _old_process_conn(conn) + native_process_conn.filenos = set() + native_server_client.server_instance.process_conn = native_process_conn + + # Trigger a crash if select() is used in the implementation + native_server_client.connect('/') + + # Ensure that at least one connection got accepted, otherwise the + # follow-up check wouldn't make sense + assert len(native_process_conn.filenos) > 0 + + # Check at least one of the sockets created are above the target number + assert any(fn >= resource_limit for fn in native_process_conn.filenos) if not IS_WINDOWS: @@ -365,6 +369,13 @@ if not IS_WINDOWS: ) +@pytest.fixture +def _garbage_bin(): + """Disable garbage collection when this fixture is in use.""" + with DefaultGc().nogc(): + yield + + @pytest.fixture def resource_limit(request): """Set the resource limit two times bigger then requested.""" @@ -392,25 +403,26 @@ def resource_limit(request): @pytest.fixture -def many_open_sockets(resource_limit): +def many_open_sockets(request, resource_limit): """Allocate a lot of file descriptors by opening dummy sockets.""" + # NOTE: `@pytest.mark.usefixtures` doesn't work on fixtures which + # NOTE: forces us to invoke this one dynamically to avoid having an + # NOTE: unused argument. + request.getfixturevalue('_garbage_bin') + # Hoard a lot of file descriptors by opening and storing a lot of sockets test_sockets = [] # Open a lot of file descriptors, so the next one the server # opens is a high number try: - for i in range(resource_limit): + for _ in range(resource_limit): sock = socket.socket() test_sockets.append(sock) - # NOTE: We used to interrupt the loop early but this doesn't seem - # NOTE: to work well in envs with indeterministic runtimes like - # NOTE: PyPy. It looks like sometimes it frees some file - # NOTE: descriptors in between running this fixture and the actual - # NOTE: test code so the early break has been removed to try - # NOTE: address that. The approach may need to be rethought if the - # NOTE: issue reoccurs. Another approach may be disabling the GC. + # If we reach a high enough number, we don't need to open more + if sock.fileno() >= resource_limit: + break # Check we opened enough descriptors to reach a high number - the_highest_fileno = max(sock.fileno() for sock in test_sockets) + the_highest_fileno = test_sockets[-1].fileno() assert the_highest_fileno >= resource_limit yield the_highest_fileno finally: diff --git a/lib/cheroot/test/test_ssl.py b/lib/cheroot/test/test_ssl.py index 8aa258f4..8da330df 100644 --- a/lib/cheroot/test/test_ssl.py +++ b/lib/cheroot/test/test_ssl.py @@ -22,7 +22,7 @@ import six import trustme from .._compat import bton, ntob, ntou -from .._compat import IS_ABOVE_OPENSSL10, IS_PYPY +from .._compat import IS_ABOVE_OPENSSL10, IS_CI, IS_PYPY from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS from ..server import HTTPServer, get_ssl_adapter_class from ..testing import ( @@ -52,6 +52,7 @@ IS_PYOPENSSL_SSL_VERSION_1_0 = ( PY27 = sys.version_info[:2] == (2, 7) PY34 = sys.version_info[:2] == (3, 4) PY3 = not six.PY2 +PY310_PLUS = sys.version_info[:2] >= (3, 10) _stdlib_to_openssl_verify = { @@ -149,8 +150,8 @@ def tls_ca_certificate_pem_path(ca): @pytest.fixture def tls_certificate(ca): """Provide a leaf certificate via fixture.""" - interface, host, port = _get_conn_data(ANY_INTERFACE_IPV4) - return ca.issue_server_cert(ntou(interface)) + interface, _host, _port = _get_conn_data(ANY_INTERFACE_IPV4) + return ca.issue_cert(ntou(interface)) @pytest.fixture @@ -270,6 +271,11 @@ def test_ssl_adapters( ssl.CERT_REQUIRED, # server should validate if client cert CA is OK ), ) +@pytest.mark.xfail( + IS_PYPY and IS_CI, + reason='Fails under PyPy in CI for unknown reason', + strict=False, +) def test_tls_client_auth( # noqa: C901 # FIXME # FIXME: remove twisted logic, separate tests mocker, @@ -294,8 +300,7 @@ def test_tls_client_auth( # noqa: C901 # FIXME 'idna.core.ulabel', return_value=ntob(tls_client_identity), ): - client_cert = client_cert_root_ca.issue_server_cert( - # FIXME: change to issue_cert once new trustme is out + client_cert = client_cert_root_ca.issue_cert( ntou(tls_client_identity), ) del client_cert_root_ca @@ -419,6 +424,10 @@ def test_tls_client_auth( # noqa: C901 # FIXME 'ConnectionResetError(10054, ' "'An existing connection was forcibly closed " "by the remote host', None, 10054, None))", + "('Connection aborted.', " + 'error(10054, ' + "'An existing connection was forcibly closed " + "by the remote host'))", ) if IS_WINDOWS else ( "('Connection aborted.', " 'OSError("(104, \'ECONNRESET\')"))', @@ -437,13 +446,35 @@ def test_tls_client_auth( # noqa: C901 # FIXME "('Connection aborted.', " "BrokenPipeError(32, 'Broken pipe'))", ) + + if PY310_PLUS: + # FIXME: Figure out what's happening and correct the problem + expected_substrings += ( + 'SSLError(SSLEOFError(8, ' + "'EOF occurred in violation of protocol (_ssl.c:", + ) + if IS_GITHUB_ACTIONS_WORKFLOW and IS_WINDOWS and PY310_PLUS: + expected_substrings += ( + "('Connection aborted.', " + 'RemoteDisconnected(' + "'Remote end closed connection without response'))", + ) + assert any(e in err_text for e in expected_substrings) @pytest.mark.parametrize( # noqa: C901 # FIXME 'adapter_type', ( - 'builtin', + pytest.param( + 'builtin', + marks=pytest.mark.xfail( + IS_GITHUB_ACTIONS_WORKFLOW and IS_MACOS and PY310_PLUS, + reason='Unclosed TLS resource warnings happen on macOS ' + 'under Python 3.10', + strict=False, + ), + ), 'pyopenssl', ), ) @@ -602,18 +633,19 @@ def test_https_over_http_error(http_server, ip_addr): assert expected_substring in ssl_err.value.args[-1] +http_over_https_error_builtin_marks = [] +if IS_WINDOWS and six.PY2: + http_over_https_error_builtin_marks.append( + pytest.mark.flaky(reruns=5, reruns_delay=2), + ) + + @pytest.mark.parametrize( 'adapter_type', ( pytest.param( 'builtin', - marks=pytest.mark.xfail( - IS_WINDOWS and six.PY2, - raises=requests.exceptions.ConnectionError, - reason='Stdlib `ssl` module behaves weirdly ' - 'on Windows under Python 2', - strict=False, - ), + marks=http_over_https_error_builtin_marks, ), 'pyopenssl', ), @@ -654,7 +686,7 @@ def test_http_over_https_error( interface, _host, port = _get_conn_data(ip_addr) tlshttpserver = tls_http_server((interface, port), tls_adapter) - interface, host, port = _get_conn_data( + interface, _host, port = _get_conn_data( tlshttpserver.bind_addr, ) diff --git a/lib/cheroot/test/test_wsgi.py b/lib/cheroot/test/test_wsgi.py index d3c47ece..91dfb71e 100644 --- a/lib/cheroot/test/test_wsgi.py +++ b/lib/cheroot/test/test_wsgi.py @@ -1,6 +1,7 @@ """Test wsgi.""" from concurrent.futures.thread import ThreadPoolExecutor +from traceback import print_tb import pytest import portend @@ -20,7 +21,7 @@ def simple_wsgi_server(): """Fucking simple wsgi server fixture (duh).""" port = portend.find_available_local_port() - def app(environ, start_response): + def app(_environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) @@ -29,7 +30,9 @@ def simple_wsgi_server(): host = '::' addr = host, port server = wsgi.Server(addr, app, timeout=600 if IS_SLOW_ENV else 20) + # pylint: disable=possibly-unused-variable url = 'http://localhost:{port}/'.format(**locals()) + # pylint: disable=possibly-unused-variable with server._run_in_thread() as thread: yield locals() @@ -46,6 +49,7 @@ def test_connection_keepalive(simple_wsgi_server): with ExceptionTrap(requests.exceptions.ConnectionError) as trap: resp = session.get('info') resp.raise_for_status() + print_tb(trap.tb) return bool(trap) with ThreadPoolExecutor(max_workers=10 if IS_SLOW_ENV else 50) as pool: @@ -56,3 +60,24 @@ def test_connection_keepalive(simple_wsgi_server): failures = sum(task.result() for task in tasks) assert not failures + + +def test_gateway_start_response_called_twice(monkeypatch): + """Verify that repeat calls of ``Gateway.start_response()`` fail.""" + monkeypatch.setattr(wsgi.Gateway, 'get_environ', lambda self: {}) + wsgi_gateway = wsgi.Gateway(None) + wsgi_gateway.started_response = True + + err_msg = '^WSGI start_response called a second time with no exc_info.$' + with pytest.raises(RuntimeError, match=err_msg): + wsgi_gateway.start_response('200', (), None) + + +def test_gateway_write_needs_start_response_called_before(monkeypatch): + """Check that calling ``Gateway.write()`` needs started response.""" + monkeypatch.setattr(wsgi.Gateway, 'get_environ', lambda self: {}) + wsgi_gateway = wsgi.Gateway(None) + + err_msg = '^WSGI write called before start_response.$' + with pytest.raises(RuntimeError, match=err_msg): + wsgi_gateway.write(None) # The actual arg value is unimportant diff --git a/lib/cheroot/test/webtest.py b/lib/cheroot/test/webtest.py index cdd340e8..118014a6 100644 --- a/lib/cheroot/test/webtest.py +++ b/lib/cheroot/test/webtest.py @@ -26,7 +26,7 @@ import time import traceback import os import json -import unittest +import unittest # pylint: disable=deprecated-module,preferred-module import warnings import functools @@ -434,7 +434,7 @@ def cleanHeaders(headers, method, body, host, port): # Add the required Host request header if not present. # [This specifies the host:port of the server, not the client.] found = False - for k, v in headers: + for k, _v in headers: if k.lower() == 'host': found = True break @@ -498,9 +498,9 @@ def openURL(*args, **kwargs): opener = functools.partial(_open_url_once, *args, **kwargs) def on_exception(): - type_, exc = sys.exc_info()[:2] + exc = sys.exc_info()[1] if isinstance(exc, raise_subcls): - raise + raise exc time.sleep(0.5) # Try up to 10 times @@ -559,6 +559,10 @@ def strip_netloc(url): Useful for wrapping an absolute-URI for which only the path is expected (such as in calls to :py:meth:`WebCase.getPage`). + .. testsetup:: + + from cheroot.test.webtest import strip_netloc + >>> strip_netloc('https://google.com/foo/bar?bing#baz') '/foo/bar?bing' @@ -569,7 +573,7 @@ def strip_netloc(url): '/foo/bar?bing' """ parsed = urllib_parse.urlparse(url) - scheme, netloc, path, params, query, fragment = parsed + _scheme, _netloc, path, params, query, _fragment = parsed stripped = '', '', path, params, query, '' return urllib_parse.urlunparse(stripped) diff --git a/lib/cheroot/testing.pyi b/lib/cheroot/testing.pyi new file mode 100644 index 00000000..4c825f98 --- /dev/null +++ b/lib/cheroot/testing.pyi @@ -0,0 +1,17 @@ +from typing import Any, Iterator, Optional, TypeVar + +from .server import HTTPServer +from .wsgi import Server + +T = TypeVar('T', bound=HTTPServer) + +EPHEMERAL_PORT: int +NO_INTERFACE: Optional[str] +ANY_INTERFACE_IPV4: str +ANY_INTERFACE_IPV6: str +config: dict + +def cheroot_server(server_factory: T) -> Iterator[T]: ... +def wsgi_server() -> Iterator[Server]: ... +def native_server() -> Iterator[HTTPServer]: ... +def get_server_client(server) -> Any: ... diff --git a/lib/cheroot/workers/__init__.pyi b/lib/cheroot/workers/__init__.pyi new file mode 100644 index 00000000..e69de29b diff --git a/lib/cheroot/workers/threadpool.py b/lib/cheroot/workers/threadpool.py index 915934cc..795ebc6d 100644 --- a/lib/cheroot/workers/threadpool.py +++ b/lib/cheroot/workers/threadpool.py @@ -108,7 +108,7 @@ class WorkerThread(threading.Thread): Retrieves incoming connections from thread pool. """ - self.server.stats['Worker Threads'][self.getName()] = self.stats + self.server.stats['Worker Threads'][self.name] = self.stats try: self.ready = True while True: @@ -173,12 +173,12 @@ class ThreadPool: def start(self): """Start the pool of threads.""" - for i in range(self.min): + for _ in range(self.min): self._threads.append(WorkerThread(self.server)) for worker in self._threads: - worker.setName( + worker.name = ( 'CP Server {worker_name!s}'. - format(worker_name=worker.getName()), + format(worker_name=worker.name), ) worker.start() for worker in self._threads: @@ -187,7 +187,7 @@ class ThreadPool: @property def idle(self): # noqa: D401; irrelevant for properties - """Number of worker threads which are idle. Read-only.""" + """Number of worker threads which are idle. Read-only.""" # noqa: D401 idles = len([t for t in self._threads if t.conn is None]) return max(idles - len(self._pending_shutdowns), 0) @@ -226,9 +226,9 @@ class ThreadPool: def _spawn_worker(self): worker = WorkerThread(self.server) - worker.setName( + worker.name = ( 'CP Server {worker_name!s}'. - format(worker_name=worker.getName()), + format(worker_name=worker.name), ) worker.start() return worker @@ -251,7 +251,7 @@ class ThreadPool: # put shutdown requests on the queue equal to the number of threads # to remove. As each request is processed by a worker, that worker # will terminate and be culled from the list. - for n in range(n_to_remove): + for _ in range(n_to_remove): self._pending_shutdowns.append(None) self._queue.put(_SHUTDOWNREQUEST) @@ -280,8 +280,9 @@ class ThreadPool: self._queue.put(_SHUTDOWNREQUEST) ignored_errors = ( - # TODO: explain this exception. - AssertionError, + # Raised when start_response called >1 time w/o exc_info or + # wsgi write is called before start_response. See cheroot#261 + RuntimeError, # Ignore repeated Ctrl-C. See cherrypy#691. KeyboardInterrupt, ) @@ -320,7 +321,7 @@ class ThreadPool: return ( thread for thread in threads - if thread is not threading.currentThread() + if thread is not threading.current_thread() ) @property diff --git a/lib/cheroot/workers/threadpool.pyi b/lib/cheroot/workers/threadpool.pyi new file mode 100644 index 00000000..201d3914 --- /dev/null +++ b/lib/cheroot/workers/threadpool.pyi @@ -0,0 +1,37 @@ +import threading +from typing import Any + +class TrueyZero: + def __add__(self, other): ... + def __radd__(self, other): ... + +trueyzero: TrueyZero + +class WorkerThread(threading.Thread): + conn: Any + server: Any + ready: bool + requests_seen: int + bytes_read: int + bytes_written: int + start_time: Any + work_time: int + stats: Any + def __init__(self, server): ... + def run(self) -> None: ... + +class ThreadPool: + server: Any + min: Any + max: Any + get: Any + def __init__(self, server, min: int = ..., max: int = ..., accepted_queue_size: int = ..., accepted_queue_timeout: int = ...) -> None: ... + def start(self) -> None: ... + @property + def idle(self): ... + def put(self, obj) -> None: ... + def grow(self, amount) -> None: ... + def shrink(self, amount) -> None: ... + def stop(self, timeout: int = ...) -> None: ... + @property + def qsize(self) -> int: ... diff --git a/lib/cheroot/wsgi.py b/lib/cheroot/wsgi.py index 6635f528..583d52a9 100644 --- a/lib/cheroot/wsgi.py +++ b/lib/cheroot/wsgi.py @@ -154,7 +154,7 @@ class Gateway(server.Gateway): # "The application may call start_response more than once, # if and only if the exc_info argument is provided." if self.started_response and not exc_info: - raise AssertionError( + raise RuntimeError( 'WSGI start_response called a second ' 'time with no exc_info.', ) @@ -209,7 +209,7 @@ class Gateway(server.Gateway): data from the iterable returned by the WSGI application). """ if not self.started_response: - raise AssertionError('WSGI write called before start_response.') + raise RuntimeError('WSGI write called before start_response.') chunklen = len(chunk) rbo = self.remaining_bytes_out diff --git a/lib/cheroot/wsgi.pyi b/lib/cheroot/wsgi.pyi new file mode 100644 index 00000000..b4851a3d --- /dev/null +++ b/lib/cheroot/wsgi.pyi @@ -0,0 +1,42 @@ +from . import server +from typing import Any + +class Server(server.HTTPServer): + wsgi_version: Any + wsgi_app: Any + request_queue_size: Any + timeout: Any + shutdown_timeout: Any + requests: Any + def __init__(self, bind_addr, wsgi_app, numthreads: int = ..., server_name: Any | None = ..., max: int = ..., request_queue_size: int = ..., timeout: int = ..., shutdown_timeout: int = ..., accepted_queue_size: int = ..., accepted_queue_timeout: int = ..., peercreds_enabled: bool = ..., peercreds_resolve_enabled: bool = ...) -> None: ... + @property + def numthreads(self): ... + @numthreads.setter + def numthreads(self, value) -> None: ... + +class Gateway(server.Gateway): + started_response: bool + env: Any + remaining_bytes_out: Any + def __init__(self, req) -> None: ... + @classmethod + def gateway_map(cls): ... + def get_environ(self) -> None: ... + def respond(self) -> None: ... + def start_response(self, status, headers, exc_info: Any | None = ...): ... + def write(self, chunk) -> None: ... + +class Gateway_10(Gateway): + version: Any + def get_environ(self): ... + +class Gateway_u0(Gateway_10): + version: Any + def get_environ(self): ... + +wsgi_gateways: Any + +class PathInfoDispatcher: + apps: Any + def __init__(self, apps): ... + def __call__(self, environ, start_response): ... diff --git a/lib/jaraco/functools.py b/lib/jaraco/functools.py index 3b1f95b7..fcdbb4f9 100644 --- a/lib/jaraco/functools.py +++ b/lib/jaraco/functools.py @@ -7,14 +7,20 @@ import itertools import more_itertools +from typing import Callable, TypeVar + + +CallableT = TypeVar("CallableT", bound=Callable[..., object]) + def compose(*funcs): """ Compose any number of unary functions into a single unary function. >>> import textwrap - >>> stripped = str.strip(textwrap.dedent(compose.__doc__)) - >>> compose(str.strip, textwrap.dedent)(compose.__doc__) == stripped + >>> expected = str.strip(textwrap.dedent(compose.__doc__)) + >>> strip_and_dedent = compose(str.strip, textwrap.dedent) + >>> strip_and_dedent(compose.__doc__) == expected True Compose also allows the innermost function to take arbitrary arguments. @@ -91,7 +97,12 @@ def once(func): return wrapper -def method_cache(method, cache_wrapper=None): +def method_cache( + method: CallableT, + cache_wrapper: Callable[ + [CallableT], CallableT + ] = functools.lru_cache(), # type: ignore[assignment] +) -> CallableT: """ Wrap lru_cache to support storing the cache data in the object instances. @@ -158,19 +169,22 @@ def method_cache(method, cache_wrapper=None): http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ for another implementation and additional justification. """ - cache_wrapper = cache_wrapper or functools.lru_cache() - def wrapper(self, *args, **kwargs): + def wrapper(self: object, *args: object, **kwargs: object) -> object: # it's the first call, replace the method with a cached, bound method - bound_method = types.MethodType(method, self) + bound_method: CallableT = types.MethodType( # type: ignore[assignment] + method, self + ) cached_method = cache_wrapper(bound_method) setattr(self, method.__name__, cached_method) return cached_method(*args, **kwargs) # Support cache clear even before cache has been created. - wrapper.cache_clear = lambda: None + wrapper.cache_clear = lambda: None # type: ignore[attr-defined] - return _special_method_cache(method, cache_wrapper) or wrapper + return ( # type: ignore[return-value] + _special_method_cache(method, cache_wrapper) or wrapper + ) def _special_method_cache(method, cache_wrapper): @@ -210,13 +224,16 @@ def apply(transform): >>> @apply(reversed) ... def get_numbers(start): + ... "doc for get_numbers" ... return range(start, start+3) >>> list(get_numbers(4)) [6, 5, 4] + >>> get_numbers.__doc__ + 'doc for get_numbers' """ def wrap(func): - return compose(transform, func) + return functools.wraps(func)(compose(transform, func)) return wrap @@ -233,6 +250,8 @@ def result_invoke(action): ... return a + b >>> x = add_two(2, 3) 5 + >>> x + 5 """ def wrap(func): diff --git a/requirements.txt b/requirements.txt index 7d3d7922..c7c99332 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ backports.zoneinfo==0.2.1 beautifulsoup4==4.10.0 bleach==4.1.0 certifi==2021.10.8 -cheroot==8.5.2 +cheroot==8.6.0 cherrypy==18.6.1 cloudinary==1.28.0 distro==1.6.0