Initial commit (Clean history)
This commit is contained in:
6
path/to/venv/lib/python3.12/site-packages/h2/__init__.py
Normal file
6
path/to/venv/lib/python3.12/site-packages/h2/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
HTTP/2 protocol implementation for Python.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
__version__ = "4.3.0"
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
212
path/to/venv/lib/python3.12/site-packages/h2/config.py
Normal file
212
path/to/venv/lib/python3.12/site-packages/h2/config.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""
|
||||
h2/config
|
||||
~~~~~~~~~
|
||||
|
||||
Objects for controlling the configuration of the HTTP/2 stack.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
|
||||
class _BooleanConfigOption:
|
||||
"""
|
||||
Descriptor for handling a boolean config option. This will block
|
||||
attempts to set boolean config options to non-bools.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
self.attr_name = f"_{self.name}"
|
||||
|
||||
def __get__(self, instance: Any, owner: Any) -> bool:
|
||||
return getattr(instance, self.attr_name) # type: ignore
|
||||
|
||||
def __set__(self, instance: Any, value: bool) -> None:
|
||||
if not isinstance(value, bool):
|
||||
msg = f"{self.name} must be a bool"
|
||||
raise ValueError(msg) # noqa: TRY004
|
||||
setattr(instance, self.attr_name, value)
|
||||
|
||||
|
||||
class DummyLogger:
|
||||
"""
|
||||
A Logger object that does not actual logging, hence a DummyLogger.
|
||||
|
||||
For the class the log operation is merely a no-op. The intent is to avoid
|
||||
conditionals being sprinkled throughout the h2 code for calls to
|
||||
logging functions when no logger is passed into the corresponding object.
|
||||
"""
|
||||
|
||||
def __init__(self, *vargs) -> None: # type: ignore
|
||||
pass
|
||||
|
||||
def debug(self, *vargs, **kwargs) -> None: # type: ignore
|
||||
"""
|
||||
No-op logging. Only level needed for now.
|
||||
"""
|
||||
|
||||
def trace(self, *vargs, **kwargs) -> None: # type: ignore
|
||||
"""
|
||||
No-op logging. Only level needed for now.
|
||||
"""
|
||||
|
||||
|
||||
class OutputLogger:
|
||||
"""
|
||||
A Logger object that prints to stderr or any other file-like object.
|
||||
|
||||
This class is provided for convenience and not part of the stable API.
|
||||
|
||||
:param file: A file-like object passed to the print function.
|
||||
Defaults to ``sys.stderr``.
|
||||
:param trace: Enables trace-level output. Defaults to ``False``.
|
||||
"""
|
||||
|
||||
def __init__(self, file=None, trace_level=False) -> None: # type: ignore
|
||||
super().__init__()
|
||||
self.file = file or sys.stderr
|
||||
self.trace_level = trace_level
|
||||
|
||||
def debug(self, fmtstr, *args) -> None: # type: ignore
|
||||
print(f"h2 (debug): {fmtstr % args}", file=self.file)
|
||||
|
||||
def trace(self, fmtstr, *args) -> None: # type: ignore
|
||||
if self.trace_level:
|
||||
print(f"h2 (trace): {fmtstr % args}", file=self.file)
|
||||
|
||||
|
||||
class H2Configuration:
|
||||
"""
|
||||
An object that controls the way a single HTTP/2 connection behaves.
|
||||
|
||||
This object allows the users to customize behaviour. In particular, it
|
||||
allows users to enable or disable optional features, or to otherwise handle
|
||||
various unusual behaviours.
|
||||
|
||||
This object has very little behaviour of its own: it mostly just ensures
|
||||
that configuration is self-consistent.
|
||||
|
||||
:param client_side: Whether this object is to be used on the client side of
|
||||
a connection, or on the server side. Affects the logic used by the
|
||||
state machine, the default settings values, the allowable stream IDs,
|
||||
and several other properties. Defaults to ``True``.
|
||||
:type client_side: ``bool``
|
||||
|
||||
:param header_encoding: Controls whether the headers emitted by this object
|
||||
in events are transparently decoded to ``unicode`` strings, and what
|
||||
encoding is used to do that decoding. This defaults to ``None``,
|
||||
meaning that headers will be returned as bytes. To automatically
|
||||
decode headers (that is, to return them as unicode strings), this can
|
||||
be set to the string name of any encoding, e.g. ``'utf-8'``.
|
||||
|
||||
.. versionchanged:: 3.0.0
|
||||
Changed default value from ``'utf-8'`` to ``None``
|
||||
|
||||
:type header_encoding: ``str``, ``False``, or ``None``
|
||||
|
||||
:param validate_outbound_headers: Controls whether the headers emitted
|
||||
by this object are validated against the rules in RFC 7540.
|
||||
Disabling this setting will cause outbound header validation to
|
||||
be skipped, and allow the object to emit headers that may be illegal
|
||||
according to RFC 7540. Defaults to ``True``.
|
||||
:type validate_outbound_headers: ``bool``
|
||||
|
||||
:param normalize_outbound_headers: Controls whether the headers emitted
|
||||
by this object are normalized before sending. Disabling this setting
|
||||
will cause outbound header normalization to be skipped, and allow
|
||||
the object to emit headers that may be illegal according to
|
||||
RFC 7540. Defaults to ``True``.
|
||||
:type normalize_outbound_headers: ``bool``
|
||||
|
||||
:param split_outbound_cookies: Controls whether the outbound cookie
|
||||
headers are split before sending or not. According to RFC 7540
|
||||
- 8.1.2.5 the outbound header cookie headers may be split to improve
|
||||
headers compression. Default is ``False``.
|
||||
:type split_outbound_cookies: ``bool``
|
||||
|
||||
:param validate_inbound_headers: Controls whether the headers received
|
||||
by this object are validated against the rules in RFC 7540.
|
||||
Disabling this setting will cause inbound header validation to
|
||||
be skipped, and allow the object to receive headers that may be illegal
|
||||
according to RFC 7540. Defaults to ``True``.
|
||||
:type validate_inbound_headers: ``bool``
|
||||
|
||||
:param normalize_inbound_headers: Controls whether the headers received by
|
||||
this object are normalized according to the rules of RFC 7540.
|
||||
Disabling this setting may lead to h2 emitting header blocks that
|
||||
some RFCs forbid, e.g. with multiple cookie fields.
|
||||
|
||||
.. versionadded:: 3.0.0
|
||||
|
||||
:type normalize_inbound_headers: ``bool``
|
||||
|
||||
:param logger: A logger that conforms to the requirements for this module,
|
||||
those being no I/O and no context switches, which is needed in order
|
||||
to run in asynchronous operation.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
|
||||
:type logger: ``logging.Logger``
|
||||
"""
|
||||
|
||||
client_side = _BooleanConfigOption("client_side")
|
||||
validate_outbound_headers = _BooleanConfigOption(
|
||||
"validate_outbound_headers",
|
||||
)
|
||||
normalize_outbound_headers = _BooleanConfigOption(
|
||||
"normalize_outbound_headers",
|
||||
)
|
||||
split_outbound_cookies = _BooleanConfigOption(
|
||||
"split_outbound_cookies",
|
||||
)
|
||||
validate_inbound_headers = _BooleanConfigOption(
|
||||
"validate_inbound_headers",
|
||||
)
|
||||
normalize_inbound_headers = _BooleanConfigOption(
|
||||
"normalize_inbound_headers",
|
||||
)
|
||||
|
||||
def __init__(self,
|
||||
client_side: bool = True,
|
||||
header_encoding: bool | str | None = None,
|
||||
validate_outbound_headers: bool = True,
|
||||
normalize_outbound_headers: bool = True,
|
||||
split_outbound_cookies: bool = False,
|
||||
validate_inbound_headers: bool = True,
|
||||
normalize_inbound_headers: bool = True,
|
||||
logger: DummyLogger | OutputLogger | None = None) -> None:
|
||||
self.client_side = client_side
|
||||
self.header_encoding = header_encoding
|
||||
self.validate_outbound_headers = validate_outbound_headers
|
||||
self.normalize_outbound_headers = normalize_outbound_headers
|
||||
self.split_outbound_cookies = split_outbound_cookies
|
||||
self.validate_inbound_headers = validate_inbound_headers
|
||||
self.normalize_inbound_headers = normalize_inbound_headers
|
||||
self.logger = logger or DummyLogger(__name__)
|
||||
|
||||
@property
|
||||
def header_encoding(self) -> bool | str | None:
|
||||
"""
|
||||
Controls whether the headers emitted by this object in events are
|
||||
transparently decoded to ``unicode`` strings, and what encoding is used
|
||||
to do that decoding. This defaults to ``None``, meaning that headers
|
||||
will be returned as bytes. To automatically decode headers (that is, to
|
||||
return them as unicode strings), this can be set to the string name of
|
||||
any encoding, e.g. ``'utf-8'``.
|
||||
"""
|
||||
return self._header_encoding
|
||||
|
||||
@header_encoding.setter
|
||||
def header_encoding(self, value: bool | str | None) -> None:
|
||||
"""
|
||||
Enforces constraints on the value of header encoding.
|
||||
"""
|
||||
if not isinstance(value, (bool, str, type(None))):
|
||||
msg = "header_encoding must be bool, string, or None"
|
||||
raise ValueError(msg) # noqa: TRY004
|
||||
if value is True:
|
||||
msg = "header_encoding cannot be True"
|
||||
raise ValueError(msg)
|
||||
self._header_encoding = value
|
||||
2110
path/to/venv/lib/python3.12/site-packages/h2/connection.py
Normal file
2110
path/to/venv/lib/python3.12/site-packages/h2/connection.py
Normal file
File diff suppressed because it is too large
Load Diff
77
path/to/venv/lib/python3.12/site-packages/h2/errors.py
Normal file
77
path/to/venv/lib/python3.12/site-packages/h2/errors.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
h2/errors
|
||||
~~~~~~~~~
|
||||
|
||||
Global error code registry containing the established HTTP/2 error codes.
|
||||
|
||||
The current registry is available at:
|
||||
https://tools.ietf.org/html/rfc7540#section-11.4
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
|
||||
|
||||
class ErrorCodes(enum.IntEnum):
|
||||
"""
|
||||
All known HTTP/2 error codes.
|
||||
|
||||
.. versionadded:: 2.5.0
|
||||
"""
|
||||
|
||||
#: Graceful shutdown.
|
||||
NO_ERROR = 0x0
|
||||
|
||||
#: Protocol error detected.
|
||||
PROTOCOL_ERROR = 0x1
|
||||
|
||||
#: Implementation fault.
|
||||
INTERNAL_ERROR = 0x2
|
||||
|
||||
#: Flow-control limits exceeded.
|
||||
FLOW_CONTROL_ERROR = 0x3
|
||||
|
||||
#: Settings not acknowledged.
|
||||
SETTINGS_TIMEOUT = 0x4
|
||||
|
||||
#: Frame received for closed stream.
|
||||
STREAM_CLOSED = 0x5
|
||||
|
||||
#: Frame size incorrect.
|
||||
FRAME_SIZE_ERROR = 0x6
|
||||
|
||||
#: Stream not processed.
|
||||
REFUSED_STREAM = 0x7
|
||||
|
||||
#: Stream cancelled.
|
||||
CANCEL = 0x8
|
||||
|
||||
#: Compression state not updated.
|
||||
COMPRESSION_ERROR = 0x9
|
||||
|
||||
#: TCP connection error for CONNECT method.
|
||||
CONNECT_ERROR = 0xa
|
||||
|
||||
#: Processing capacity exceeded.
|
||||
ENHANCE_YOUR_CALM = 0xb
|
||||
|
||||
#: Negotiated TLS parameters not acceptable.
|
||||
INADEQUATE_SECURITY = 0xc
|
||||
|
||||
#: Use HTTP/1.1 for the request.
|
||||
HTTP_1_1_REQUIRED = 0xd
|
||||
|
||||
|
||||
def _error_code_from_int(code: int) -> ErrorCodes | int:
|
||||
"""
|
||||
Given an integer error code, returns either one of :class:`ErrorCodes
|
||||
<h2.errors.ErrorCodes>` or, if not present in the known set of codes,
|
||||
returns the integer directly.
|
||||
"""
|
||||
try:
|
||||
return ErrorCodes(code)
|
||||
except ValueError:
|
||||
return code
|
||||
|
||||
|
||||
__all__ = ["ErrorCodes"]
|
||||
679
path/to/venv/lib/python3.12/site-packages/h2/events.py
Normal file
679
path/to/venv/lib/python3.12/site-packages/h2/events.py
Normal file
@@ -0,0 +1,679 @@
|
||||
"""
|
||||
h2/events
|
||||
~~~~~~~~~
|
||||
|
||||
Defines Event types for HTTP/2.
|
||||
|
||||
Events are returned by the H2 state machine to allow implementations to keep
|
||||
track of events triggered by receiving data. Each time data is provided to the
|
||||
H2 state machine it processes the data and returns a list of Event objects.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import binascii
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from .settings import ChangedSetting, SettingCodes, Settings, _setting_code_from_int
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from hpack.struct import Header
|
||||
from hyperframe.frame import Frame
|
||||
|
||||
from .errors import ErrorCodes
|
||||
|
||||
|
||||
if sys.version_info < (3, 10): # pragma: no cover
|
||||
kw_only: dict[str, bool] = {}
|
||||
else: # pragma: no cover
|
||||
kw_only = {"kw_only": True}
|
||||
|
||||
|
||||
_LAZY_INIT: Any = object()
|
||||
"""
|
||||
Some h2 events are instantiated by the state machine, but its attributes are
|
||||
subsequently populated by H2Stream. To make this work with strict type annotations
|
||||
on the events, they are temporarily set to this placeholder value.
|
||||
This value should never be exposed to users.
|
||||
"""
|
||||
|
||||
|
||||
class Event:
|
||||
"""
|
||||
Base class for h2 events.
|
||||
"""
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class RequestReceived(Event):
|
||||
"""
|
||||
The RequestReceived event is fired whenever all of a request's headers
|
||||
are received. This event carries the HTTP headers for the given request
|
||||
and the stream ID of the new stream.
|
||||
|
||||
In HTTP/2, headers may be sent as a HEADERS frame followed by zero or more
|
||||
CONTINUATION frames with the final frame setting the END_HEADERS flag.
|
||||
This event is fired after the entire sequence is received.
|
||||
|
||||
.. versionchanged:: 2.3.0
|
||||
Changed the type of ``headers`` to :class:`HeaderTuple
|
||||
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
|
||||
|
||||
.. versionchanged:: 2.4.0
|
||||
Added ``stream_ended`` and ``priority_updated`` properties.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""The Stream ID for the stream this request was made on."""
|
||||
|
||||
headers: list[Header] = _LAZY_INIT
|
||||
"""The request headers."""
|
||||
|
||||
stream_ended: StreamEnded | None = None
|
||||
"""
|
||||
If this request also ended the stream, the associated
|
||||
:class:`StreamEnded <h2.events.StreamEnded>` event will be available
|
||||
here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
priority_updated: PriorityUpdated | None = None
|
||||
"""
|
||||
If this request also had associated priority information, the
|
||||
associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
|
||||
event will be available here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<RequestReceived stream_id:{self.stream_id}, headers:{self.headers}>"
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class ResponseReceived(Event):
|
||||
"""
|
||||
The ResponseReceived event is fired whenever response headers are received.
|
||||
This event carries the HTTP headers for the given response and the stream
|
||||
ID of the new stream.
|
||||
|
||||
.. versionchanged:: 2.3.0
|
||||
Changed the type of ``headers`` to :class:`HeaderTuple
|
||||
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
|
||||
|
||||
.. versionchanged:: 2.4.0
|
||||
Added ``stream_ended`` and ``priority_updated`` properties.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""The Stream ID for the stream this response was made on."""
|
||||
|
||||
headers: list[Header] = _LAZY_INIT
|
||||
"""The response headers."""
|
||||
|
||||
stream_ended: StreamEnded | None = None
|
||||
"""
|
||||
If this response also ended the stream, the associated
|
||||
:class:`StreamEnded <h2.events.StreamEnded>` event will be available
|
||||
here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
priority_updated: PriorityUpdated | None = None
|
||||
"""
|
||||
If this response also had associated priority information, the
|
||||
associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
|
||||
event will be available here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<ResponseReceived stream_id:{self.stream_id}, headers:{self.headers}>"
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class TrailersReceived(Event):
|
||||
"""
|
||||
The TrailersReceived event is fired whenever trailers are received on a
|
||||
stream. Trailers are a set of headers sent after the body of the
|
||||
request/response, and are used to provide information that wasn't known
|
||||
ahead of time (e.g. content-length). This event carries the HTTP header
|
||||
fields that form the trailers and the stream ID of the stream on which they
|
||||
were received.
|
||||
|
||||
.. versionchanged:: 2.3.0
|
||||
Changed the type of ``headers`` to :class:`HeaderTuple
|
||||
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
|
||||
|
||||
.. versionchanged:: 2.4.0
|
||||
Added ``stream_ended`` and ``priority_updated`` properties.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""The Stream ID for the stream on which these trailers were received."""
|
||||
|
||||
headers: list[Header] = _LAZY_INIT
|
||||
"""The trailers themselves."""
|
||||
|
||||
stream_ended: StreamEnded | None = None
|
||||
"""
|
||||
Trailers always end streams. This property has the associated
|
||||
:class:`StreamEnded <h2.events.StreamEnded>` in it.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
priority_updated: PriorityUpdated | None = None
|
||||
"""
|
||||
If the trailers also set associated priority information, the
|
||||
associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
|
||||
event will be available here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<TrailersReceived stream_id:{self.stream_id}, headers:{self.headers}>"
|
||||
|
||||
|
||||
class _HeadersSent(Event):
|
||||
"""
|
||||
The _HeadersSent event is fired whenever headers are sent.
|
||||
|
||||
This is an internal event, used to determine validation steps on
|
||||
outgoing header blocks.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class _ResponseSent(_HeadersSent):
|
||||
"""
|
||||
The _ResponseSent event is fired whenever response headers are sent
|
||||
on a stream.
|
||||
|
||||
This is an internal event, used to determine validation steps on
|
||||
outgoing header blocks.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class _RequestSent(_HeadersSent):
|
||||
"""
|
||||
The _RequestSent event is fired whenever request headers are sent
|
||||
on a stream.
|
||||
|
||||
This is an internal event, used to determine validation steps on
|
||||
outgoing header blocks.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class _TrailersSent(_HeadersSent):
|
||||
"""
|
||||
The _TrailersSent event is fired whenever trailers are sent on a
|
||||
stream. Trailers are a set of headers sent after the body of the
|
||||
request/response, and are used to provide information that wasn't known
|
||||
ahead of time (e.g. content-length).
|
||||
|
||||
This is an internal event, used to determine validation steps on
|
||||
outgoing header blocks.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class _PushedRequestSent(_HeadersSent):
|
||||
"""
|
||||
The _PushedRequestSent event is fired whenever pushed request headers are
|
||||
sent.
|
||||
|
||||
This is an internal event, used to determine validation steps on outgoing
|
||||
header blocks.
|
||||
"""
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class InformationalResponseReceived(Event):
|
||||
"""
|
||||
The InformationalResponseReceived event is fired when an informational
|
||||
response (that is, one whose status code is a 1XX code) is received from
|
||||
the remote peer.
|
||||
|
||||
The remote peer may send any number of these, from zero upwards. These
|
||||
responses are most commonly sent in response to requests that have the
|
||||
``expect: 100-continue`` header field present. Most users can safely
|
||||
ignore this event unless you are intending to use the
|
||||
``expect: 100-continue`` flow, or are for any reason expecting a different
|
||||
1XX status code.
|
||||
|
||||
.. versionadded:: 2.2.0
|
||||
|
||||
.. versionchanged:: 2.3.0
|
||||
Changed the type of ``headers`` to :class:`HeaderTuple
|
||||
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
|
||||
|
||||
.. versionchanged:: 2.4.0
|
||||
Added ``priority_updated`` property.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""The Stream ID for the stream this informational response was made on."""
|
||||
|
||||
headers: list[Header] = _LAZY_INIT
|
||||
"""The headers for this informational response."""
|
||||
|
||||
priority_updated: PriorityUpdated | None = None
|
||||
"""
|
||||
If this response also had associated priority information, the
|
||||
associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
|
||||
event will be available here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<InformationalResponseReceived stream_id:{self.stream_id}, headers:{self.headers}>"
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class DataReceived(Event):
|
||||
"""
|
||||
The DataReceived event is fired whenever data is received on a stream from
|
||||
the remote peer. The event carries the data itself, and the stream ID on
|
||||
which the data was received.
|
||||
|
||||
.. versionchanged:: 2.4.0
|
||||
Added ``stream_ended`` property.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""The Stream ID for the stream this data was received on."""
|
||||
|
||||
data: bytes = _LAZY_INIT
|
||||
"""The data itself."""
|
||||
|
||||
flow_controlled_length: int = _LAZY_INIT
|
||||
"""
|
||||
The amount of data received that counts against the flow control
|
||||
window. Note that padding counts against the flow control window, so
|
||||
when adjusting flow control you should always use this field rather
|
||||
than ``len(data)``.
|
||||
"""
|
||||
|
||||
stream_ended: StreamEnded | None = None
|
||||
"""
|
||||
If this data chunk also completed the stream, the associated
|
||||
:class:`StreamEnded <h2.events.StreamEnded>` event will be available
|
||||
here.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
"<DataReceived stream_id:{}, "
|
||||
"flow_controlled_length:{}, "
|
||||
"data:{}>".format(
|
||||
self.stream_id,
|
||||
self.flow_controlled_length,
|
||||
_bytes_representation(self.data[:20]) if self.data else "",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class WindowUpdated(Event):
|
||||
"""
|
||||
The WindowUpdated event is fired whenever a flow control window changes
|
||||
size. HTTP/2 defines flow control windows for connections and streams: this
|
||||
event fires for both connections and streams. The event carries the ID of
|
||||
the stream to which it applies (set to zero if the window update applies to
|
||||
the connection), and the delta in the window size.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""
|
||||
The Stream ID of the stream whose flow control window was changed.
|
||||
May be ``0`` if the connection window was changed.
|
||||
"""
|
||||
|
||||
delta: int = _LAZY_INIT
|
||||
"""
|
||||
The window delta.
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<WindowUpdated stream_id:{self.stream_id}, delta:{self.delta}>"
|
||||
|
||||
|
||||
class RemoteSettingsChanged(Event):
|
||||
"""
|
||||
The RemoteSettingsChanged event is fired whenever the remote peer changes
|
||||
its settings. It contains a complete inventory of changed settings,
|
||||
including their previous values.
|
||||
|
||||
In HTTP/2, settings changes need to be acknowledged. h2 automatically
|
||||
acknowledges settings changes for efficiency. However, it is possible that
|
||||
the caller may not be happy with the changed setting.
|
||||
|
||||
When this event is received, the caller should confirm that the new
|
||||
settings are acceptable. If they are not acceptable, the user should close
|
||||
the connection with the error code :data:`PROTOCOL_ERROR
|
||||
<h2.errors.ErrorCodes.PROTOCOL_ERROR>`.
|
||||
|
||||
.. versionchanged:: 2.0.0
|
||||
Prior to this version the user needed to acknowledge settings changes.
|
||||
This is no longer the case: h2 now automatically acknowledges
|
||||
them.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
#: A dictionary of setting byte to
|
||||
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
|
||||
#: the changed settings.
|
||||
self.changed_settings: dict[int, ChangedSetting] = {}
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls,
|
||||
old_settings: Settings | dict[int, int],
|
||||
new_settings: dict[int, int]) -> RemoteSettingsChanged:
|
||||
"""
|
||||
Build a RemoteSettingsChanged event from a set of changed settings.
|
||||
|
||||
:param old_settings: A complete collection of old settings, in the form
|
||||
of a dictionary of ``{setting: value}``.
|
||||
:param new_settings: All the changed settings and their new values, in
|
||||
the form of a dictionary of ``{setting: value}``.
|
||||
"""
|
||||
e = cls()
|
||||
for setting, new_value in new_settings.items():
|
||||
s = _setting_code_from_int(setting)
|
||||
original_value = old_settings.get(s)
|
||||
change = ChangedSetting(s, original_value, new_value)
|
||||
e.changed_settings[s] = change
|
||||
|
||||
return e
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<RemoteSettingsChanged changed_settings:{{{}}}>".format(
|
||||
", ".join(repr(cs) for cs in self.changed_settings.values()),
|
||||
)
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class PingReceived(Event):
|
||||
"""
|
||||
The PingReceived event is fired whenever a PING is received. It contains
|
||||
the 'opaque data' of the PING frame. A ping acknowledgment with the same
|
||||
'opaque data' is automatically emitted after receiving a ping.
|
||||
|
||||
.. versionadded:: 3.1.0
|
||||
"""
|
||||
|
||||
ping_data: bytes
|
||||
"""The data included on the ping."""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<PingReceived ping_data:{_bytes_representation(self.ping_data)}>"
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class PingAckReceived(Event):
|
||||
"""
|
||||
The PingAckReceived event is fired whenever a PING acknowledgment is
|
||||
received. It contains the 'opaque data' of the PING+ACK frame, allowing the
|
||||
user to correlate PINGs and calculate RTT.
|
||||
|
||||
.. versionadded:: 3.1.0
|
||||
|
||||
.. versionchanged:: 4.0.0
|
||||
Removed deprecated but equivalent ``PingAcknowledged``.
|
||||
"""
|
||||
|
||||
ping_data: bytes
|
||||
"""The data included on the ping."""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<PingAckReceived ping_data:{_bytes_representation(self.ping_data)}>"
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class StreamEnded(Event):
|
||||
"""
|
||||
The StreamEnded event is fired whenever a stream is ended by a remote
|
||||
party. The stream may not be fully closed if it has not been closed
|
||||
locally, but no further data or headers should be expected on that stream.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""The Stream ID of the stream that was closed."""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<StreamEnded stream_id:{self.stream_id}>"
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class StreamReset(Event):
|
||||
"""
|
||||
The StreamReset event is fired in two situations. The first is when the
|
||||
remote party forcefully resets the stream. The second is when the remote
|
||||
party has made a protocol error which only affects a single stream. In this
|
||||
case, h2 will terminate the stream early and return this event.
|
||||
|
||||
.. versionchanged:: 2.0.0
|
||||
This event is now fired when h2 automatically resets a stream.
|
||||
"""
|
||||
|
||||
stream_id: int
|
||||
"""
|
||||
The Stream ID of the stream that was reset.
|
||||
"""
|
||||
|
||||
error_code: ErrorCodes | int = _LAZY_INIT
|
||||
"""
|
||||
The error code given.
|
||||
"""
|
||||
|
||||
remote_reset: bool = True
|
||||
"""
|
||||
Whether the remote peer sent a RST_STREAM or we did.
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<StreamReset stream_id:{self.stream_id}, error_code:{self.error_code!s}, remote_reset:{self.remote_reset}>"
|
||||
|
||||
|
||||
class PushedStreamReceived(Event):
|
||||
"""
|
||||
The PushedStreamReceived event is fired whenever a pushed stream has been
|
||||
received from a remote peer. The event carries on it the new stream ID, the
|
||||
ID of the parent stream, and the request headers pushed by the remote peer.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
#: The Stream ID of the stream created by the push.
|
||||
self.pushed_stream_id: int | None = None
|
||||
|
||||
#: The Stream ID of the stream that the push is related to.
|
||||
self.parent_stream_id: int | None = None
|
||||
|
||||
#: The request headers, sent by the remote party in the push.
|
||||
self.headers: list[Header] | None = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<PushedStreamReceived pushed_stream_id:{self.pushed_stream_id}, parent_stream_id:{self.parent_stream_id}, "
|
||||
f"headers:{self.headers}>"
|
||||
)
|
||||
|
||||
|
||||
class SettingsAcknowledged(Event):
|
||||
"""
|
||||
The SettingsAcknowledged event is fired whenever a settings ACK is received
|
||||
from the remote peer. The event carries on it the settings that were
|
||||
acknowedged, in the same format as
|
||||
:class:`h2.events.RemoteSettingsChanged`.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
#: A dictionary of setting byte to
|
||||
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
|
||||
#: the changed settings.
|
||||
self.changed_settings: dict[SettingCodes | int, ChangedSetting] = {}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
s = ", ".join(repr(cs) for cs in self.changed_settings.values())
|
||||
return f"<SettingsAcknowledged changed_settings:{{{s}}}>"
|
||||
|
||||
|
||||
class PriorityUpdated(Event):
|
||||
"""
|
||||
The PriorityUpdated event is fired whenever a stream sends updated priority
|
||||
information. This can occur when the stream is opened, or at any time
|
||||
during the stream lifetime.
|
||||
|
||||
This event is purely advisory, and does not need to be acted on.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
#: The ID of the stream whose priority information is being updated.
|
||||
self.stream_id: int | None = None
|
||||
|
||||
#: The new stream weight. May be the same as the original stream
|
||||
#: weight. An integer between 1 and 256.
|
||||
self.weight: int | None = None
|
||||
|
||||
#: The stream ID this stream now depends on. May be ``0``.
|
||||
self.depends_on: int | None = None
|
||||
|
||||
#: Whether the stream *exclusively* depends on the parent stream. If it
|
||||
#: does, this stream should inherit the current children of its new
|
||||
#: parent.
|
||||
self.exclusive: bool | None = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<PriorityUpdated stream_id:{self.stream_id}, weight:{self.weight}, depends_on:{self.depends_on}, "
|
||||
f"exclusive:{self.exclusive}>"
|
||||
)
|
||||
|
||||
|
||||
class ConnectionTerminated(Event):
|
||||
"""
|
||||
The ConnectionTerminated event is fired when a connection is torn down by
|
||||
the remote peer using a GOAWAY frame. Once received, no further action may
|
||||
be taken on the connection: a new connection must be established.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
#: The error code cited when tearing down the connection. Should be
|
||||
#: one of :class:`ErrorCodes <h2.errors.ErrorCodes>`, but may not be if
|
||||
#: unknown HTTP/2 extensions are being used.
|
||||
self.error_code: ErrorCodes | int | None = None
|
||||
|
||||
#: The stream ID of the last stream the remote peer saw. This can
|
||||
#: provide an indication of what data, if any, never reached the remote
|
||||
#: peer and so can safely be resent.
|
||||
self.last_stream_id: int | None = None
|
||||
|
||||
#: Additional debug data that can be appended to GOAWAY frame.
|
||||
self.additional_data: bytes | None = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
"<ConnectionTerminated error_code:{!s}, last_stream_id:{}, "
|
||||
"additional_data:{}>".format(
|
||||
self.error_code,
|
||||
self.last_stream_id,
|
||||
_bytes_representation(
|
||||
self.additional_data[:20]
|
||||
if self.additional_data else None),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AlternativeServiceAvailable(Event):
|
||||
"""
|
||||
The AlternativeServiceAvailable event is fired when the remote peer
|
||||
advertises an `RFC 7838 <https://tools.ietf.org/html/rfc7838>`_ Alternative
|
||||
Service using an ALTSVC frame.
|
||||
|
||||
This event always carries the origin to which the ALTSVC information
|
||||
applies. That origin is either supplied by the server directly, or inferred
|
||||
by h2 from the ``:authority`` pseudo-header field that was sent by
|
||||
the user when initiating a given stream.
|
||||
|
||||
This event also carries what RFC 7838 calls the "Alternative Service Field
|
||||
Value", which is formatted like a HTTP header field and contains the
|
||||
relevant alternative service information. h2 does not parse or in any
|
||||
way modify that information: the user is required to do that.
|
||||
|
||||
This event can only be fired on the client end of a connection.
|
||||
|
||||
.. versionadded:: 2.3.0
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
#: The origin to which the alternative service field value applies.
|
||||
#: This field is either supplied by the server directly, or inferred by
|
||||
#: h2 from the ``:authority`` pseudo-header field that was sent
|
||||
#: by the user when initiating the stream on which the frame was
|
||||
#: received.
|
||||
self.origin: bytes | None = None
|
||||
|
||||
#: The ALTSVC field value. This contains information about the HTTP
|
||||
#: alternative service being advertised by the server. h2 does
|
||||
#: not parse this field: it is left exactly as sent by the server. The
|
||||
#: structure of the data in this field is given by `RFC 7838 Section 3
|
||||
#: <https://tools.ietf.org/html/rfc7838#section-3>`_.
|
||||
self.field_value: bytes | None = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
"<AlternativeServiceAvailable origin:{}, field_value:{}>".format(
|
||||
(self.origin or b"").decode("utf-8", "ignore"),
|
||||
(self.field_value or b"").decode("utf-8", "ignore"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@dataclass(**kw_only)
|
||||
class UnknownFrameReceived(Event):
|
||||
"""
|
||||
The UnknownFrameReceived event is fired when the remote peer sends a frame
|
||||
that h2 does not understand. This occurs primarily when the remote
|
||||
peer is employing HTTP/2 extensions that h2 doesn't know anything
|
||||
about.
|
||||
|
||||
RFC 7540 requires that HTTP/2 implementations ignore these frames. h2
|
||||
does so. However, this event is fired to allow implementations to perform
|
||||
special processing on those frames if needed (e.g. if the implementation
|
||||
is capable of handling the frame itself).
|
||||
|
||||
.. versionadded:: 2.7.0
|
||||
"""
|
||||
|
||||
frame: Frame
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<UnknownFrameReceived>"
|
||||
|
||||
|
||||
def _bytes_representation(data: bytes | None) -> str | None:
|
||||
"""
|
||||
Converts a bytestring into something that is safe to print on all Python
|
||||
platforms.
|
||||
|
||||
This function is relatively expensive, so it should not be called on the
|
||||
mainline of the code. It's safe to use in things like object repr methods
|
||||
though.
|
||||
"""
|
||||
if data is None:
|
||||
return None
|
||||
|
||||
return binascii.hexlify(data).decode("ascii")
|
||||
194
path/to/venv/lib/python3.12/site-packages/h2/exceptions.py
Normal file
194
path/to/venv/lib/python3.12/site-packages/h2/exceptions.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""
|
||||
h2/exceptions
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Exceptions for the HTTP/2 module.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from .errors import ErrorCodes
|
||||
|
||||
|
||||
class H2Error(Exception):
|
||||
"""
|
||||
The base class for all exceptions for the HTTP/2 module.
|
||||
"""
|
||||
|
||||
|
||||
class ProtocolError(H2Error):
|
||||
"""
|
||||
An action was attempted in violation of the HTTP/2 protocol.
|
||||
"""
|
||||
|
||||
#: The error code corresponds to this kind of Protocol Error.
|
||||
error_code = ErrorCodes.PROTOCOL_ERROR
|
||||
|
||||
|
||||
class FrameTooLargeError(ProtocolError):
|
||||
"""
|
||||
The frame that we tried to send or that we received was too large.
|
||||
"""
|
||||
|
||||
#: The error code corresponds to this kind of Protocol Error.
|
||||
error_code = ErrorCodes.FRAME_SIZE_ERROR
|
||||
|
||||
|
||||
class FrameDataMissingError(ProtocolError):
|
||||
"""
|
||||
The frame that we received is missing some data.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
"""
|
||||
|
||||
#: The error code corresponds to this kind of Protocol Error.
|
||||
error_code = ErrorCodes.FRAME_SIZE_ERROR
|
||||
|
||||
|
||||
class TooManyStreamsError(ProtocolError):
|
||||
"""
|
||||
An attempt was made to open a stream that would lead to too many concurrent
|
||||
streams.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class FlowControlError(ProtocolError):
|
||||
"""
|
||||
An attempted action violates flow control constraints.
|
||||
"""
|
||||
|
||||
#: The error code corresponds to this kind of Protocol Error.
|
||||
error_code = ErrorCodes.FLOW_CONTROL_ERROR
|
||||
|
||||
|
||||
class StreamIDTooLowError(ProtocolError):
|
||||
"""
|
||||
An attempt was made to open a stream that had an ID that is lower than the
|
||||
highest ID we have seen on this connection.
|
||||
"""
|
||||
|
||||
def __init__(self, stream_id: int, max_stream_id: int) -> None:
|
||||
#: The ID of the stream that we attempted to open.
|
||||
self.stream_id = stream_id
|
||||
|
||||
#: The current highest-seen stream ID.
|
||||
self.max_stream_id = max_stream_id
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"StreamIDTooLowError: {self.stream_id} is lower than {self.max_stream_id}"
|
||||
|
||||
|
||||
class NoAvailableStreamIDError(ProtocolError):
|
||||
"""
|
||||
There are no available stream IDs left to the connection. All stream IDs
|
||||
have been exhausted.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class NoSuchStreamError(ProtocolError):
|
||||
"""
|
||||
A stream-specific action referenced a stream that does not exist.
|
||||
|
||||
.. versionchanged:: 2.0.0
|
||||
Became a subclass of :class:`ProtocolError
|
||||
<h2.exceptions.ProtocolError>`
|
||||
"""
|
||||
|
||||
def __init__(self, stream_id: int) -> None:
|
||||
#: The stream ID corresponds to the non-existent stream.
|
||||
self.stream_id = stream_id
|
||||
|
||||
|
||||
class StreamClosedError(NoSuchStreamError):
|
||||
"""
|
||||
A more specific form of
|
||||
:class:`NoSuchStreamError <h2.exceptions.NoSuchStreamError>`. Indicates
|
||||
that the stream has since been closed, and that all state relating to that
|
||||
stream has been removed.
|
||||
"""
|
||||
|
||||
def __init__(self, stream_id: int) -> None:
|
||||
#: The stream ID corresponds to the nonexistent stream.
|
||||
self.stream_id = stream_id
|
||||
|
||||
#: The relevant HTTP/2 error code.
|
||||
self.error_code = ErrorCodes.STREAM_CLOSED
|
||||
|
||||
# Any events that internal code may need to fire. Not relevant to
|
||||
# external users that may receive a StreamClosedError.
|
||||
self._events = [] # type: ignore
|
||||
|
||||
|
||||
class InvalidSettingsValueError(ProtocolError, ValueError):
|
||||
"""
|
||||
An attempt was made to set an invalid Settings value.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
"""
|
||||
|
||||
def __init__(self, msg: str, error_code: ErrorCodes) -> None:
|
||||
super().__init__(msg)
|
||||
self.error_code = error_code
|
||||
|
||||
|
||||
class InvalidBodyLengthError(ProtocolError):
|
||||
"""
|
||||
The remote peer sent more or less data that the Content-Length header
|
||||
indicated.
|
||||
|
||||
.. versionadded:: 2.0.0
|
||||
"""
|
||||
|
||||
def __init__(self, expected: int, actual: int) -> None:
|
||||
self.expected_length = expected
|
||||
self.actual_length = actual
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"InvalidBodyLengthError: Expected {self.expected_length} bytes, received {self.actual_length}"
|
||||
|
||||
|
||||
class UnsupportedFrameError(ProtocolError):
|
||||
"""
|
||||
The remote peer sent a frame that is unsupported in this context.
|
||||
|
||||
.. versionadded:: 2.1.0
|
||||
|
||||
.. versionchanged:: 4.0.0
|
||||
Removed deprecated KeyError parent class.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class RFC1122Error(H2Error):
|
||||
"""
|
||||
Emitted when users attempt to do something that is literally allowed by the
|
||||
relevant RFC, but is sufficiently ill-defined that it's unwise to allow
|
||||
users to actually do it.
|
||||
|
||||
While there is some disagreement about whether or not we should be liberal
|
||||
in what accept, it is a truth universally acknowledged that we should be
|
||||
conservative in what emit.
|
||||
|
||||
.. versionadded:: 2.4.0
|
||||
"""
|
||||
|
||||
# shazow says I'm going to regret naming the exception this way. If that
|
||||
# turns out to be true, TELL HIM NOTHING.
|
||||
|
||||
|
||||
class DenialOfServiceError(ProtocolError):
|
||||
"""
|
||||
Emitted when the remote peer exhibits a behaviour that is likely to be an
|
||||
attempt to perform a Denial of Service attack on the implementation. This
|
||||
is a form of ProtocolError that carries a different error code, and allows
|
||||
more easy detection of this kind of behaviour.
|
||||
|
||||
.. versionadded:: 2.5.0
|
||||
"""
|
||||
|
||||
#: The error code corresponds to this kind of
|
||||
#: :class:`ProtocolError <h2.exceptions.ProtocolError>`
|
||||
error_code = ErrorCodes.ENHANCE_YOUR_CALM
|
||||
161
path/to/venv/lib/python3.12/site-packages/h2/frame_buffer.py
Normal file
161
path/to/venv/lib/python3.12/site-packages/h2/frame_buffer.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
h2/frame_buffer
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
A data structure that provides a way to iterate over a byte buffer in terms of
|
||||
frames.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from hyperframe.exceptions import InvalidDataError, InvalidFrameError
|
||||
from hyperframe.frame import ContinuationFrame, Frame, HeadersFrame, PushPromiseFrame
|
||||
|
||||
from .exceptions import FrameDataMissingError, FrameTooLargeError, ProtocolError
|
||||
|
||||
# To avoid a DOS attack based on sending loads of continuation frames, we limit
|
||||
# the maximum number we're perpared to receive. In this case, we'll set the
|
||||
# limit to 64, which means the largest encoded header block we can receive by
|
||||
# default is 262144 bytes long, and the largest possible *at all* is 1073741760
|
||||
# bytes long.
|
||||
#
|
||||
# This value seems reasonable for now, but in future we may want to evaluate
|
||||
# making it configurable.
|
||||
CONTINUATION_BACKLOG = 64
|
||||
|
||||
|
||||
class FrameBuffer:
|
||||
"""
|
||||
A buffer data structure for HTTP/2 data that allows iteraton in terms of
|
||||
H2 frames.
|
||||
"""
|
||||
|
||||
def __init__(self, server: bool = False) -> None:
|
||||
self._data = bytearray()
|
||||
self.max_frame_size = 0
|
||||
self._preamble = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" if server else b""
|
||||
self._preamble_len = len(self._preamble)
|
||||
self._headers_buffer: list[HeadersFrame | ContinuationFrame | PushPromiseFrame] = []
|
||||
|
||||
def add_data(self, data: bytes) -> None:
|
||||
"""
|
||||
Add more data to the frame buffer.
|
||||
|
||||
:param data: A bytestring containing the byte buffer.
|
||||
"""
|
||||
if self._preamble_len:
|
||||
data_len = len(data)
|
||||
of_which_preamble = min(self._preamble_len, data_len)
|
||||
|
||||
if self._preamble[:of_which_preamble] != data[:of_which_preamble]:
|
||||
msg = "Invalid HTTP/2 preamble."
|
||||
raise ProtocolError(msg)
|
||||
|
||||
data = data[of_which_preamble:]
|
||||
self._preamble_len -= of_which_preamble
|
||||
self._preamble = self._preamble[of_which_preamble:]
|
||||
|
||||
self._data += data
|
||||
|
||||
def _validate_frame_length(self, length: int) -> None:
|
||||
"""
|
||||
Confirm that the frame is an appropriate length.
|
||||
"""
|
||||
if length > self.max_frame_size:
|
||||
msg = f"Received overlong frame: length {length}, max {self.max_frame_size}"
|
||||
raise FrameTooLargeError(msg)
|
||||
|
||||
def _update_header_buffer(self, f: Frame | None) -> Frame | None:
|
||||
"""
|
||||
Updates the internal header buffer. Returns a frame that should replace
|
||||
the current one. May throw exceptions if this frame is invalid.
|
||||
"""
|
||||
# Check if we're in the middle of a headers block. If we are, this
|
||||
# frame *must* be a CONTINUATION frame with the same stream ID as the
|
||||
# leading HEADERS or PUSH_PROMISE frame. Anything else is a
|
||||
# ProtocolError. If the frame *is* valid, append it to the header
|
||||
# buffer.
|
||||
if self._headers_buffer:
|
||||
stream_id = self._headers_buffer[0].stream_id
|
||||
valid_frame = (
|
||||
f is not None and
|
||||
isinstance(f, ContinuationFrame) and
|
||||
f.stream_id == stream_id
|
||||
)
|
||||
if not valid_frame:
|
||||
msg = "Invalid frame during header block."
|
||||
raise ProtocolError(msg)
|
||||
assert isinstance(f, ContinuationFrame)
|
||||
|
||||
# Append the frame to the buffer.
|
||||
self._headers_buffer.append(f)
|
||||
if len(self._headers_buffer) > CONTINUATION_BACKLOG:
|
||||
msg = "Too many continuation frames received."
|
||||
raise ProtocolError(msg)
|
||||
|
||||
# If this is the end of the header block, then we want to build a
|
||||
# mutant HEADERS frame that's massive. Use the original one we got,
|
||||
# then set END_HEADERS and set its data appopriately. If it's not
|
||||
# the end of the block, lose the current frame: we can't yield it.
|
||||
if "END_HEADERS" in f.flags:
|
||||
f = self._headers_buffer[0]
|
||||
f.flags.add("END_HEADERS")
|
||||
f.data = b"".join(x.data for x in self._headers_buffer)
|
||||
self._headers_buffer = []
|
||||
else:
|
||||
f = None
|
||||
elif (isinstance(f, (HeadersFrame, PushPromiseFrame)) and
|
||||
"END_HEADERS" not in f.flags):
|
||||
# This is the start of a headers block! Save the frame off and then
|
||||
# act like we didn't receive one.
|
||||
self._headers_buffer.append(f)
|
||||
f = None
|
||||
|
||||
return f
|
||||
|
||||
# The methods below support the iterator protocol.
|
||||
def __iter__(self) -> FrameBuffer:
|
||||
return self
|
||||
|
||||
def __next__(self) -> Frame:
|
||||
# First, check that we have enough data to successfully parse the
|
||||
# next frame header. If not, bail. Otherwise, parse it.
|
||||
if len(self._data) < 9:
|
||||
raise StopIteration
|
||||
|
||||
try:
|
||||
f, length = Frame.parse_frame_header(memoryview(self._data[:9]))
|
||||
except (InvalidDataError, InvalidFrameError) as err: # pragma: no cover
|
||||
msg = f"Received frame with invalid header: {err!s}"
|
||||
raise ProtocolError(msg) from err
|
||||
|
||||
# Next, check that we have enough length to parse the frame body. If
|
||||
# not, bail, leaving the frame header data in the buffer for next time.
|
||||
if len(self._data) < length + 9:
|
||||
raise StopIteration
|
||||
|
||||
# Confirm the frame has an appropriate length.
|
||||
self._validate_frame_length(length)
|
||||
|
||||
# Try to parse the frame body
|
||||
try:
|
||||
f.parse_body(memoryview(self._data[9:9+length]))
|
||||
except InvalidDataError as err:
|
||||
msg = "Received frame with non-compliant data"
|
||||
raise ProtocolError(msg) from err
|
||||
except InvalidFrameError as err:
|
||||
msg = "Frame data missing or invalid"
|
||||
raise FrameDataMissingError(msg) from err
|
||||
|
||||
# At this point, as we know we'll use or discard the entire frame, we
|
||||
# can update the data.
|
||||
self._data = self._data[9+length:]
|
||||
|
||||
# Pass the frame through the header buffer.
|
||||
new_frame = self._update_header_buffer(f)
|
||||
|
||||
# If we got a frame we didn't understand or shouldn't yield, rather
|
||||
# than return None it'd be better if we just tried to get the next
|
||||
# frame in the sequence instead. Recurse back into ourselves to do
|
||||
# that. This is safe because the amount of work we have to do here is
|
||||
# strictly bounded by the length of the buffer.
|
||||
return new_frame if new_frame is not None else self.__next__()
|
||||
331
path/to/venv/lib/python3.12/site-packages/h2/settings.py
Normal file
331
path/to/venv/lib/python3.12/site-packages/h2/settings.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""
|
||||
h2/settings
|
||||
~~~~~~~~~~~
|
||||
|
||||
This module contains a HTTP/2 settings object. This object provides a simple
|
||||
API for manipulating HTTP/2 settings, keeping track of both the current active
|
||||
state of the settings and the unacknowledged future values of the settings.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import enum
|
||||
from collections.abc import Iterator, MutableMapping
|
||||
from typing import Union
|
||||
|
||||
from hyperframe.frame import SettingsFrame
|
||||
|
||||
from .errors import ErrorCodes
|
||||
from .exceptions import InvalidSettingsValueError
|
||||
|
||||
|
||||
class SettingCodes(enum.IntEnum):
|
||||
"""
|
||||
All known HTTP/2 setting codes.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
|
||||
#: Allows the sender to inform the remote endpoint of the maximum size of
|
||||
#: the header compression table used to decode header blocks, in octets.
|
||||
HEADER_TABLE_SIZE = SettingsFrame.HEADER_TABLE_SIZE
|
||||
|
||||
#: This setting can be used to disable server push. To disable server push
|
||||
#: on a client, set this to 0.
|
||||
ENABLE_PUSH = SettingsFrame.ENABLE_PUSH
|
||||
|
||||
#: Indicates the maximum number of concurrent streams that the sender will
|
||||
#: allow.
|
||||
MAX_CONCURRENT_STREAMS = SettingsFrame.MAX_CONCURRENT_STREAMS
|
||||
|
||||
#: Indicates the sender's initial window size (in octets) for stream-level
|
||||
#: flow control.
|
||||
INITIAL_WINDOW_SIZE = SettingsFrame.INITIAL_WINDOW_SIZE
|
||||
|
||||
#: Indicates the size of the largest frame payload that the sender is
|
||||
#: willing to receive, in octets.
|
||||
MAX_FRAME_SIZE = SettingsFrame.MAX_FRAME_SIZE
|
||||
|
||||
#: This advisory setting informs a peer of the maximum size of header list
|
||||
#: that the sender is prepared to accept, in octets. The value is based on
|
||||
#: the uncompressed size of header fields, including the length of the name
|
||||
#: and value in octets plus an overhead of 32 octets for each header field.
|
||||
MAX_HEADER_LIST_SIZE = SettingsFrame.MAX_HEADER_LIST_SIZE
|
||||
|
||||
#: This setting can be used to enable the connect protocol. To enable on a
|
||||
#: client set this to 1.
|
||||
ENABLE_CONNECT_PROTOCOL = SettingsFrame.ENABLE_CONNECT_PROTOCOL
|
||||
|
||||
|
||||
def _setting_code_from_int(code: int) -> SettingCodes | int:
|
||||
"""
|
||||
Given an integer setting code, returns either one of :class:`SettingCodes
|
||||
<h2.settings.SettingCodes>` or, if not present in the known set of codes,
|
||||
returns the integer directly.
|
||||
"""
|
||||
try:
|
||||
return SettingCodes(code)
|
||||
except ValueError:
|
||||
return code
|
||||
|
||||
|
||||
class ChangedSetting:
|
||||
|
||||
def __init__(self, setting: SettingCodes | int, original_value: int | None, new_value: int) -> None:
|
||||
#: The setting code given. Either one of :class:`SettingCodes
|
||||
#: <h2.settings.SettingCodes>` or ``int``
|
||||
#:
|
||||
#: .. versionchanged:: 2.6.0
|
||||
self.setting = setting
|
||||
|
||||
#: The original value before being changed.
|
||||
self.original_value = original_value
|
||||
|
||||
#: The new value after being changed.
|
||||
self.new_value = new_value
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"ChangedSetting(setting={self.setting!s}, original_value={self.original_value}, new_value={self.new_value})"
|
||||
)
|
||||
|
||||
|
||||
class Settings(MutableMapping[Union[SettingCodes, int], int]):
|
||||
"""
|
||||
An object that encapsulates HTTP/2 settings state.
|
||||
|
||||
HTTP/2 Settings are a complex beast. Each party, remote and local, has its
|
||||
own settings and a view of the other party's settings. When a settings
|
||||
frame is emitted by a peer it cannot assume that the new settings values
|
||||
are in place until the remote peer acknowledges the setting. In principle,
|
||||
multiple settings changes can be "in flight" at the same time, all with
|
||||
different values.
|
||||
|
||||
This object encapsulates this mess. It provides a dict-like interface to
|
||||
settings, which return the *current* values of the settings in question.
|
||||
Additionally, it keeps track of the stack of proposed values: each time an
|
||||
acknowledgement is sent/received, it updates the current values with the
|
||||
stack of proposed values. On top of all that, it validates the values to
|
||||
make sure they're allowed, and raises :class:`InvalidSettingsValueError
|
||||
<h2.exceptions.InvalidSettingsValueError>` if they are not.
|
||||
|
||||
Finally, this object understands what the default values of the HTTP/2
|
||||
settings are, and sets those defaults appropriately.
|
||||
|
||||
.. versionchanged:: 2.2.0
|
||||
Added the ``initial_values`` parameter.
|
||||
|
||||
.. versionchanged:: 2.5.0
|
||||
Added the ``max_header_list_size`` property.
|
||||
|
||||
:param client: (optional) Whether these settings should be defaulted for a
|
||||
client implementation or a server implementation. Defaults to ``True``.
|
||||
:type client: ``bool``
|
||||
:param initial_values: (optional) Any initial values the user would like
|
||||
set, rather than RFC 7540's defaults.
|
||||
:type initial_vales: ``MutableMapping``
|
||||
"""
|
||||
|
||||
def __init__(self, client: bool = True, initial_values: dict[SettingCodes, int] | None = None) -> None:
|
||||
# Backing object for the settings. This is a dictionary of
|
||||
# (setting: [list of values]), where the first value in the list is the
|
||||
# current value of the setting. Strictly this doesn't use lists but
|
||||
# instead uses collections.deque to avoid repeated memory allocations.
|
||||
#
|
||||
# This contains the default values for HTTP/2.
|
||||
self._settings: dict[SettingCodes | int, collections.deque[int]] = {
|
||||
SettingCodes.HEADER_TABLE_SIZE: collections.deque([4096]),
|
||||
SettingCodes.ENABLE_PUSH: collections.deque([int(client)]),
|
||||
SettingCodes.INITIAL_WINDOW_SIZE: collections.deque([65535]),
|
||||
SettingCodes.MAX_FRAME_SIZE: collections.deque([16384]),
|
||||
SettingCodes.ENABLE_CONNECT_PROTOCOL: collections.deque([0]),
|
||||
}
|
||||
if initial_values is not None:
|
||||
for key, value in initial_values.items():
|
||||
invalid = _validate_setting(key, value)
|
||||
if invalid:
|
||||
msg = f"Setting {key} has invalid value {value}"
|
||||
raise InvalidSettingsValueError(
|
||||
msg,
|
||||
error_code=invalid,
|
||||
)
|
||||
self._settings[key] = collections.deque([value])
|
||||
|
||||
def acknowledge(self) -> dict[SettingCodes | int, ChangedSetting]:
|
||||
"""
|
||||
The settings have been acknowledged, either by the user (remote
|
||||
settings) or by the remote peer (local settings).
|
||||
|
||||
:returns: A dict of {setting: ChangedSetting} that were applied.
|
||||
"""
|
||||
changed_settings: dict[SettingCodes | int, ChangedSetting] = {}
|
||||
|
||||
# If there is more than one setting in the list, we have a setting
|
||||
# value outstanding. Update them.
|
||||
for k, v in self._settings.items():
|
||||
if len(v) > 1:
|
||||
old_setting = v.popleft()
|
||||
new_setting = v[0]
|
||||
changed_settings[k] = ChangedSetting(
|
||||
k, old_setting, new_setting,
|
||||
)
|
||||
|
||||
return changed_settings
|
||||
|
||||
# Provide easy-access to well known settings.
|
||||
@property
|
||||
def header_table_size(self) -> int:
|
||||
"""
|
||||
The current value of the :data:`HEADER_TABLE_SIZE
|
||||
<h2.settings.SettingCodes.HEADER_TABLE_SIZE>` setting.
|
||||
"""
|
||||
return self[SettingCodes.HEADER_TABLE_SIZE]
|
||||
|
||||
@header_table_size.setter
|
||||
def header_table_size(self, value: int) -> None:
|
||||
self[SettingCodes.HEADER_TABLE_SIZE] = value
|
||||
|
||||
@property
|
||||
def enable_push(self) -> int:
|
||||
"""
|
||||
The current value of the :data:`ENABLE_PUSH
|
||||
<h2.settings.SettingCodes.ENABLE_PUSH>` setting.
|
||||
"""
|
||||
return self[SettingCodes.ENABLE_PUSH]
|
||||
|
||||
@enable_push.setter
|
||||
def enable_push(self, value: int) -> None:
|
||||
self[SettingCodes.ENABLE_PUSH] = value
|
||||
|
||||
@property
|
||||
def initial_window_size(self) -> int:
|
||||
"""
|
||||
The current value of the :data:`INITIAL_WINDOW_SIZE
|
||||
<h2.settings.SettingCodes.INITIAL_WINDOW_SIZE>` setting.
|
||||
"""
|
||||
return self[SettingCodes.INITIAL_WINDOW_SIZE]
|
||||
|
||||
@initial_window_size.setter
|
||||
def initial_window_size(self, value: int) -> None:
|
||||
self[SettingCodes.INITIAL_WINDOW_SIZE] = value
|
||||
|
||||
@property
|
||||
def max_frame_size(self) -> int:
|
||||
"""
|
||||
The current value of the :data:`MAX_FRAME_SIZE
|
||||
<h2.settings.SettingCodes.MAX_FRAME_SIZE>` setting.
|
||||
"""
|
||||
return self[SettingCodes.MAX_FRAME_SIZE]
|
||||
|
||||
@max_frame_size.setter
|
||||
def max_frame_size(self, value: int) -> None:
|
||||
self[SettingCodes.MAX_FRAME_SIZE] = value
|
||||
|
||||
@property
|
||||
def max_concurrent_streams(self) -> int:
|
||||
"""
|
||||
The current value of the :data:`MAX_CONCURRENT_STREAMS
|
||||
<h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS>` setting.
|
||||
"""
|
||||
return self.get(SettingCodes.MAX_CONCURRENT_STREAMS, 2**32+1)
|
||||
|
||||
@max_concurrent_streams.setter
|
||||
def max_concurrent_streams(self, value: int) -> None:
|
||||
self[SettingCodes.MAX_CONCURRENT_STREAMS] = value
|
||||
|
||||
@property
|
||||
def max_header_list_size(self) -> int | None:
|
||||
"""
|
||||
The current value of the :data:`MAX_HEADER_LIST_SIZE
|
||||
<h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE>` setting. If not set,
|
||||
returns ``None``, which means unlimited.
|
||||
|
||||
.. versionadded:: 2.5.0
|
||||
"""
|
||||
return self.get(SettingCodes.MAX_HEADER_LIST_SIZE, None)
|
||||
|
||||
@max_header_list_size.setter
|
||||
def max_header_list_size(self, value: int) -> None:
|
||||
self[SettingCodes.MAX_HEADER_LIST_SIZE] = value
|
||||
|
||||
@property
|
||||
def enable_connect_protocol(self) -> int:
|
||||
"""
|
||||
The current value of the :data:`ENABLE_CONNECT_PROTOCOL
|
||||
<h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL>` setting.
|
||||
"""
|
||||
return self[SettingCodes.ENABLE_CONNECT_PROTOCOL]
|
||||
|
||||
@enable_connect_protocol.setter
|
||||
def enable_connect_protocol(self, value: int) -> None:
|
||||
self[SettingCodes.ENABLE_CONNECT_PROTOCOL] = value
|
||||
|
||||
# Implement the MutableMapping API.
|
||||
def __getitem__(self, key: SettingCodes | int) -> int:
|
||||
val = self._settings[key][0]
|
||||
|
||||
# Things that were created when a setting was received should stay
|
||||
# KeyError'd.
|
||||
if val is None:
|
||||
raise KeyError
|
||||
|
||||
return val
|
||||
|
||||
def __setitem__(self, key: SettingCodes | int, value: int) -> None:
|
||||
invalid = _validate_setting(key, value)
|
||||
if invalid:
|
||||
msg = f"Setting {key} has invalid value {value}"
|
||||
raise InvalidSettingsValueError(
|
||||
msg,
|
||||
error_code=invalid,
|
||||
)
|
||||
|
||||
try:
|
||||
items = self._settings[key]
|
||||
except KeyError:
|
||||
items = collections.deque([None]) # type: ignore
|
||||
self._settings[key] = items
|
||||
|
||||
items.append(value)
|
||||
|
||||
def __delitem__(self, key: SettingCodes | int) -> None:
|
||||
del self._settings[key]
|
||||
|
||||
def __iter__(self) -> Iterator[SettingCodes | int]:
|
||||
return self._settings.__iter__()
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._settings)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, Settings):
|
||||
return self._settings == other._settings
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
if isinstance(other, Settings):
|
||||
return not self == other
|
||||
return NotImplemented
|
||||
|
||||
|
||||
def _validate_setting(setting: SettingCodes | int, value: int) -> ErrorCodes:
|
||||
"""
|
||||
Confirms that a specific setting has a well-formed value. If the setting is
|
||||
invalid, returns an error code. Otherwise, returns 0 (NO_ERROR).
|
||||
"""
|
||||
if setting == SettingCodes.ENABLE_PUSH:
|
||||
if value not in (0, 1):
|
||||
return ErrorCodes.PROTOCOL_ERROR
|
||||
elif setting == SettingCodes.INITIAL_WINDOW_SIZE:
|
||||
if not 0 <= value <= 2147483647: # 2^31 - 1
|
||||
return ErrorCodes.FLOW_CONTROL_ERROR
|
||||
elif setting == SettingCodes.MAX_FRAME_SIZE:
|
||||
if not 16384 <= value <= 16777215: # 2^14 and 2^24 - 1
|
||||
return ErrorCodes.PROTOCOL_ERROR
|
||||
elif setting == SettingCodes.MAX_HEADER_LIST_SIZE:
|
||||
if value < 0:
|
||||
return ErrorCodes.PROTOCOL_ERROR
|
||||
elif setting == SettingCodes.ENABLE_CONNECT_PROTOCOL and value not in (0, 1):
|
||||
return ErrorCodes.PROTOCOL_ERROR
|
||||
|
||||
return ErrorCodes.NO_ERROR
|
||||
1425
path/to/venv/lib/python3.12/site-packages/h2/stream.py
Normal file
1425
path/to/venv/lib/python3.12/site-packages/h2/stream.py
Normal file
File diff suppressed because it is too large
Load Diff
700
path/to/venv/lib/python3.12/site-packages/h2/utilities.py
Normal file
700
path/to/venv/lib/python3.12/site-packages/h2/utilities.py
Normal file
@@ -0,0 +1,700 @@
|
||||
"""
|
||||
h2/utilities
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Utility functions that do not belong in a separate module.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
from typing import TYPE_CHECKING, Any, NamedTuple
|
||||
|
||||
from hpack.struct import HeaderTuple, NeverIndexedHeaderTuple
|
||||
|
||||
from .exceptions import FlowControlError, ProtocolError
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from collections.abc import Generator, Iterable
|
||||
|
||||
from hpack.struct import Header, HeaderWeaklyTyped
|
||||
|
||||
SIGIL = ord(b":")
|
||||
INFORMATIONAL_START = ord(b"1")
|
||||
|
||||
|
||||
# A set of headers that are hop-by-hop or connection-specific and thus
|
||||
# forbidden in HTTP/2. This list comes from RFC 7540 § 8.1.2.2.
|
||||
CONNECTION_HEADERS = frozenset([
|
||||
b"connection",
|
||||
b"proxy-connection",
|
||||
b"keep-alive",
|
||||
b"transfer-encoding",
|
||||
b"upgrade",
|
||||
])
|
||||
|
||||
|
||||
_ALLOWED_PSEUDO_HEADER_FIELDS = frozenset([
|
||||
b":method",
|
||||
b":scheme",
|
||||
b":authority",
|
||||
b":path",
|
||||
b":status",
|
||||
b":protocol",
|
||||
])
|
||||
|
||||
|
||||
_SECURE_HEADERS = frozenset([
|
||||
# May have basic credentials which are vulnerable to dictionary attacks.
|
||||
b"authorization",
|
||||
b"proxy-authorization",
|
||||
])
|
||||
|
||||
|
||||
_REQUEST_ONLY_HEADERS = frozenset([
|
||||
b":scheme",
|
||||
b":path",
|
||||
b":authority",
|
||||
b":method",
|
||||
b":protocol",
|
||||
])
|
||||
|
||||
|
||||
_RESPONSE_ONLY_HEADERS = frozenset([b":status"])
|
||||
|
||||
|
||||
# A Set of pseudo headers that are only valid if the method is
|
||||
# CONNECT, see RFC 8441 § 5
|
||||
_CONNECT_REQUEST_ONLY_HEADERS = frozenset([b":protocol"])
|
||||
|
||||
|
||||
def _secure_headers(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags | None) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Certain headers are at risk of being attacked during the header compression
|
||||
phase, and so need to be kept out of header compression contexts. This
|
||||
function automatically transforms certain specific headers into HPACK
|
||||
never-indexed fields to ensure they don't get added to header compression
|
||||
contexts.
|
||||
|
||||
This function currently implements two rules:
|
||||
|
||||
- 'authorization' and 'proxy-authorization' fields are automatically made
|
||||
never-indexed.
|
||||
- Any 'cookie' header field shorter than 20 bytes long is made
|
||||
never-indexed.
|
||||
|
||||
These fields are the most at-risk. These rules are inspired by Firefox
|
||||
and nghttp2.
|
||||
"""
|
||||
for header in headers:
|
||||
assert isinstance(header[0], bytes)
|
||||
if header[0] in _SECURE_HEADERS or (header[0] in b"cookie" and len(header[1]) < 20):
|
||||
yield NeverIndexedHeaderTuple(header[0], header[1])
|
||||
else:
|
||||
yield header
|
||||
|
||||
|
||||
def extract_method_header(headers: Iterable[Header]) -> bytes | None:
|
||||
"""
|
||||
Extracts the request method from the headers list.
|
||||
"""
|
||||
for k, v in headers:
|
||||
if isinstance(v, bytes) and k == b":method":
|
||||
return v
|
||||
if isinstance(v, str) and k == ":method":
|
||||
return v.encode("utf-8") # pragma: no cover
|
||||
return None
|
||||
|
||||
|
||||
def is_informational_response(headers: Iterable[Header]) -> bool:
|
||||
"""
|
||||
Searches headers list for a :status header to confirm that a given
|
||||
collection of headers are an informational response. Assumes the header
|
||||
are well formed and encoded as bytes: that is, that the HTTP/2 special
|
||||
headers are first in the block, and so that it can stop looking when it
|
||||
finds the first header field whose name does not begin with a colon.
|
||||
|
||||
:param headers: The HTTP/2 headers.
|
||||
:returns: A boolean indicating if this is an informational response.
|
||||
"""
|
||||
for n, v in headers:
|
||||
if not n.startswith(b":"):
|
||||
return False
|
||||
if n != b":status":
|
||||
# If we find a non-special header, we're done here: stop looping.
|
||||
continue
|
||||
# If the first digit is a 1, we've got informational headers.
|
||||
return v.startswith(b"1")
|
||||
return False
|
||||
|
||||
|
||||
def guard_increment_window(current: int, increment: int) -> int:
|
||||
"""
|
||||
Increments a flow control window, guarding against that window becoming too
|
||||
large.
|
||||
|
||||
:param current: The current value of the flow control window.
|
||||
:param increment: The increment to apply to that window.
|
||||
:returns: The new value of the window.
|
||||
:raises: ``FlowControlError``
|
||||
"""
|
||||
# The largest value the flow control window may take.
|
||||
LARGEST_FLOW_CONTROL_WINDOW = 2**31 - 1 # noqa: N806
|
||||
|
||||
new_size = current + increment
|
||||
|
||||
if new_size > LARGEST_FLOW_CONTROL_WINDOW:
|
||||
msg = f"May not increment flow control window past {LARGEST_FLOW_CONTROL_WINDOW}"
|
||||
raise FlowControlError(msg)
|
||||
|
||||
return new_size
|
||||
|
||||
|
||||
def authority_from_headers(headers: Iterable[Header]) -> bytes | None:
|
||||
"""
|
||||
Given a header set, searches for the authority header and returns the
|
||||
value.
|
||||
|
||||
Note that this doesn't use indexing, so should only be called if the
|
||||
headers are for a client request. Otherwise, will loop over the entire
|
||||
header set, which is potentially unwise.
|
||||
|
||||
:param headers: The HTTP header set.
|
||||
:returns: The value of the authority header, or ``None``.
|
||||
:rtype: ``bytes`` or ``None``.
|
||||
"""
|
||||
for n, v in headers:
|
||||
if n == b":authority":
|
||||
return v
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Flags used by the validate_headers pipeline to determine which checks
|
||||
# should be applied to a given set of headers.
|
||||
class HeaderValidationFlags(NamedTuple):
|
||||
is_client: bool
|
||||
is_trailer: bool
|
||||
is_response_header: bool
|
||||
is_push_promise: bool
|
||||
|
||||
|
||||
def validate_headers(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Iterable[Header]:
|
||||
"""
|
||||
Validates a header sequence against a set of constraints from RFC 7540.
|
||||
|
||||
:param headers: The HTTP header set.
|
||||
:param hdr_validation_flags: An instance of HeaderValidationFlags.
|
||||
"""
|
||||
# This validation logic is built on a sequence of generators that are
|
||||
# iterated over to provide the final header list. This reduces some of the
|
||||
# overhead of doing this checking. However, it's worth noting that this
|
||||
# checking remains somewhat expensive, and attempts should be made wherever
|
||||
# possible to reduce the time spent doing them.
|
||||
#
|
||||
# For example, we avoid tuple unpacking in loops because it represents a
|
||||
# fixed cost that we don't want to spend, instead indexing into the header
|
||||
# tuples.
|
||||
headers = _reject_illegal_characters(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _reject_empty_header_names(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _reject_te(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _reject_connection_header(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _reject_pseudo_header_fields(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _check_host_authority_header(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
return _check_path_header(headers, hdr_validation_flags)
|
||||
|
||||
|
||||
def _reject_illegal_characters(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises a ProtocolError if any header names or values contain illegal characters.
|
||||
See <https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.1>.
|
||||
"""
|
||||
for header in headers:
|
||||
# > A field name MUST NOT contain characters in the ranges 0x00-0x20, 0x41-0x5a,
|
||||
# > or 0x7f-0xff (all ranges inclusive).
|
||||
for c in header[0]:
|
||||
if 0x41 <= c <= 0x5a:
|
||||
msg = f"Received uppercase header name {header[0]!r}."
|
||||
raise ProtocolError(msg)
|
||||
if c <= 0x20 or c >= 0x7f:
|
||||
msg = f"Illegal character '{chr(c)}' in header name: {header[0]!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
# > With the exception of pseudo-header fields (Section 8.3), which have a name
|
||||
# > that starts with a single colon, field names MUST NOT include a colon (ASCII
|
||||
# > COLON, 0x3a).
|
||||
if header[0].find(b":", 1) != -1:
|
||||
msg = f"Illegal character ':' in header name: {header[0]!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
# For compatibility with RFC 7230 header fields, we need to allow the field
|
||||
# value to be an empty string. This is ludicrous, but technically allowed.
|
||||
if field_value := header[1]:
|
||||
|
||||
# > A field value MUST NOT contain the zero value (ASCII NUL, 0x00), line feed
|
||||
# > (ASCII LF, 0x0a), or carriage return (ASCII CR, 0x0d) at any position.
|
||||
for c in field_value:
|
||||
if c == 0 or c == 0x0a or c == 0x0d: # noqa: PLR1714
|
||||
msg = f"Illegal character '{chr(c)}' in header value: {field_value!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
# > A field value MUST NOT start or end with an ASCII whitespace character
|
||||
# > (ASCII SP or HTAB, 0x20 or 0x09).
|
||||
if (
|
||||
field_value[0] == 0x20 or
|
||||
field_value[0] == 0x09 or
|
||||
field_value[-1] == 0x20 or
|
||||
field_value[-1] == 0x09
|
||||
):
|
||||
msg = f"Received header value surrounded by whitespace {field_value!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
yield header
|
||||
|
||||
|
||||
def _reject_empty_header_names(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises a ProtocolError if any header names are empty (length 0).
|
||||
While hpack decodes such headers without errors, they are semantically
|
||||
forbidden in HTTP, see RFC 7230, stating that they must be at least one
|
||||
character long.
|
||||
"""
|
||||
for header in headers:
|
||||
if len(header[0]) == 0:
|
||||
msg = "Received header name with zero length."
|
||||
raise ProtocolError(msg)
|
||||
yield header
|
||||
|
||||
|
||||
def _reject_te(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises a ProtocolError if the TE header is present in a header block and
|
||||
its value is anything other than "trailers".
|
||||
"""
|
||||
for header in headers:
|
||||
if header[0] == b"te" and header[1].lower() != b"trailers":
|
||||
msg = f"Invalid value for TE header: {header[1]!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
yield header
|
||||
|
||||
|
||||
def _reject_connection_header(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises a ProtocolError if the Connection header is present in a header
|
||||
block.
|
||||
"""
|
||||
for header in headers:
|
||||
if header[0] in CONNECTION_HEADERS:
|
||||
msg = f"Connection-specific header field present: {header[0]!r}."
|
||||
raise ProtocolError(msg)
|
||||
|
||||
yield header
|
||||
|
||||
|
||||
def _assert_header_in_set(bytes_header: bytes,
|
||||
header_set: set[bytes | str] | set[bytes] | set[str]) -> None:
|
||||
"""
|
||||
Given a set of header names, checks whether the string or byte version of
|
||||
the header name is present. Raises a Protocol error with the appropriate
|
||||
error if it's missing.
|
||||
"""
|
||||
if bytes_header not in header_set:
|
||||
msg = f"Header block missing mandatory {bytes_header!r} header"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
|
||||
def _reject_pseudo_header_fields(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises a ProtocolError if duplicate pseudo-header fields are found in a
|
||||
header block or if a pseudo-header field appears in a block after an
|
||||
ordinary header field.
|
||||
|
||||
Raises a ProtocolError if pseudo-header fields are found in trailers.
|
||||
"""
|
||||
seen_pseudo_header_fields = set()
|
||||
seen_regular_header = False
|
||||
method = None
|
||||
|
||||
for header in headers:
|
||||
if header[0][0] == SIGIL:
|
||||
if header[0] in seen_pseudo_header_fields:
|
||||
msg = f"Received duplicate pseudo-header field {header[0]!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
seen_pseudo_header_fields.add(header[0])
|
||||
|
||||
if seen_regular_header:
|
||||
msg = f"Received pseudo-header field out of sequence: {header[0]!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
if header[0] not in _ALLOWED_PSEUDO_HEADER_FIELDS:
|
||||
msg = f"Received custom pseudo-header field {header[0]!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
if header[0] in b":method":
|
||||
method = header[1]
|
||||
|
||||
else:
|
||||
seen_regular_header = True
|
||||
|
||||
yield header
|
||||
|
||||
# Check the pseudo-headers we got to confirm they're acceptable.
|
||||
_check_pseudo_header_field_acceptability(
|
||||
seen_pseudo_header_fields, method, hdr_validation_flags,
|
||||
)
|
||||
|
||||
|
||||
def _check_pseudo_header_field_acceptability(pseudo_headers: set[bytes | str] | set[bytes] | set[str],
|
||||
method: bytes | None,
|
||||
hdr_validation_flags: HeaderValidationFlags) -> None:
|
||||
"""
|
||||
Given the set of pseudo-headers present in a header block and the
|
||||
validation flags, confirms that RFC 7540 allows them.
|
||||
"""
|
||||
# Pseudo-header fields MUST NOT appear in trailers - RFC 7540 § 8.1.2.1
|
||||
if hdr_validation_flags.is_trailer and pseudo_headers:
|
||||
msg = f"Received pseudo-header in trailer {pseudo_headers}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
# If ':status' pseudo-header is not there in a response header, reject it.
|
||||
# Similarly, if ':path', ':method', or ':scheme' are not there in a request
|
||||
# header, reject it. Additionally, if a response contains any request-only
|
||||
# headers or vice-versa, reject it.
|
||||
# Relevant RFC section: RFC 7540 § 8.1.2.4
|
||||
# https://tools.ietf.org/html/rfc7540#section-8.1.2.4
|
||||
if hdr_validation_flags.is_response_header:
|
||||
_assert_header_in_set(b":status", pseudo_headers)
|
||||
invalid_response_headers = pseudo_headers & _REQUEST_ONLY_HEADERS
|
||||
if invalid_response_headers:
|
||||
msg = f"Encountered request-only headers {invalid_response_headers}"
|
||||
raise ProtocolError(msg)
|
||||
elif (not hdr_validation_flags.is_response_header and
|
||||
not hdr_validation_flags.is_trailer):
|
||||
# This is a request, so we need to have seen :path, :method, and
|
||||
# :scheme.
|
||||
_assert_header_in_set(b":path", pseudo_headers)
|
||||
_assert_header_in_set(b":method", pseudo_headers)
|
||||
_assert_header_in_set(b":scheme", pseudo_headers)
|
||||
invalid_request_headers = pseudo_headers & _RESPONSE_ONLY_HEADERS
|
||||
if invalid_request_headers:
|
||||
msg = f"Encountered response-only headers {invalid_request_headers}"
|
||||
raise ProtocolError(msg)
|
||||
if method != b"CONNECT":
|
||||
invalid_headers = pseudo_headers & _CONNECT_REQUEST_ONLY_HEADERS
|
||||
if invalid_headers:
|
||||
msg = f"Encountered connect-request-only headers {invalid_headers!r}"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
|
||||
def _validate_host_authority_header(headers: Iterable[Header]) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Given the :authority and Host headers from a request block that isn't
|
||||
a trailer, check that:
|
||||
1. At least one of these headers is set.
|
||||
2. If both headers are set, they match.
|
||||
|
||||
:param headers: The HTTP header set.
|
||||
:raises: ``ProtocolError``
|
||||
"""
|
||||
# We use None as a sentinel value. Iterate over the list of headers,
|
||||
# and record the value of these headers (if present). We don't need
|
||||
# to worry about receiving duplicate :authority headers, as this is
|
||||
# enforced by the _reject_pseudo_header_fields() pipeline.
|
||||
#
|
||||
# TODO: We should also guard against receiving duplicate Host headers,
|
||||
# and against sending duplicate headers.
|
||||
authority_header_val = None
|
||||
host_header_val = None
|
||||
|
||||
for header in headers:
|
||||
if header[0] == b":authority":
|
||||
authority_header_val = header[1]
|
||||
elif header[0] == b"host":
|
||||
host_header_val = header[1]
|
||||
|
||||
yield header
|
||||
|
||||
# If we have not-None values for these variables, then we know we saw
|
||||
# the corresponding header.
|
||||
authority_present = (authority_header_val is not None)
|
||||
host_present = (host_header_val is not None)
|
||||
|
||||
# It is an error for a request header block to contain neither
|
||||
# an :authority header nor a Host header.
|
||||
if not authority_present and not host_present:
|
||||
msg = "Request header block does not have an :authority or Host header."
|
||||
raise ProtocolError(msg)
|
||||
|
||||
# If we receive both headers, they should definitely match.
|
||||
if authority_present and host_present and authority_header_val != host_header_val:
|
||||
msg = (
|
||||
"Request header block has mismatched :authority and "
|
||||
f"Host headers: {authority_header_val!r} / {host_header_val!r}"
|
||||
)
|
||||
raise ProtocolError(msg)
|
||||
|
||||
|
||||
def _check_host_authority_header(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises a ProtocolError if a header block arrives that does not contain an
|
||||
:authority or a Host header, or if a header block contains both fields,
|
||||
but their values do not match.
|
||||
"""
|
||||
# We only expect to see :authority and Host headers on request header
|
||||
# blocks that aren't trailers, so skip this validation if this is a
|
||||
# response header or we're looking at trailer blocks.
|
||||
skip_validation = (
|
||||
hdr_validation_flags.is_response_header or
|
||||
hdr_validation_flags.is_trailer
|
||||
)
|
||||
if skip_validation:
|
||||
return (h for h in headers)
|
||||
|
||||
return _validate_host_authority_header(headers)
|
||||
|
||||
|
||||
def _check_path_header(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raise a ProtocolError if a header block arrives or is sent that contains an
|
||||
empty :path header.
|
||||
"""
|
||||
def inner() -> Generator[Header, None, None]:
|
||||
for header in headers:
|
||||
if header[0] == b":path" and not header[1]:
|
||||
msg = "An empty :path header is forbidden"
|
||||
raise ProtocolError(msg)
|
||||
|
||||
yield header
|
||||
|
||||
# We only expect to see :authority and Host headers on request header
|
||||
# blocks that aren't trailers, so skip this validation if this is a
|
||||
# response header or we're looking at trailer blocks.
|
||||
skip_validation = (
|
||||
hdr_validation_flags.is_response_header or
|
||||
hdr_validation_flags.is_trailer
|
||||
)
|
||||
if skip_validation:
|
||||
return (h for h in headers)
|
||||
return inner()
|
||||
|
||||
|
||||
def _to_bytes(v: bytes | str) -> bytes:
|
||||
"""
|
||||
Given an assumed `str` (or anything that supports `.encode()`),
|
||||
encodes it using utf-8 into bytes. Returns the unmodified object
|
||||
if it is already a `bytes` object.
|
||||
"""
|
||||
return v if isinstance(v, bytes) else v.encode("utf-8")
|
||||
|
||||
|
||||
def utf8_encode_headers(headers: Iterable[HeaderWeaklyTyped]) -> list[Header]:
|
||||
"""
|
||||
Given an iterable of header two-tuples, rebuilds that as a list with the
|
||||
header names and values encoded as utf-8 bytes. This function produces
|
||||
tuples that preserve the original type of the header tuple for tuple and
|
||||
any ``HeaderTuple``.
|
||||
"""
|
||||
encoded_headers: list[Header] = []
|
||||
for header in headers:
|
||||
h = (_to_bytes(header[0]), _to_bytes(header[1]))
|
||||
if isinstance(header, HeaderTuple):
|
||||
encoded_headers.append(header.__class__(h[0], h[1]))
|
||||
else:
|
||||
encoded_headers.append(h)
|
||||
return encoded_headers
|
||||
|
||||
|
||||
def _lowercase_header_names(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags | None) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Given an iterable of header two-tuples, rebuilds that iterable with the
|
||||
header names lowercased. This generator produces tuples that preserve the
|
||||
original type of the header tuple for tuple and any ``HeaderTuple``.
|
||||
"""
|
||||
for header in headers:
|
||||
if isinstance(header, HeaderTuple):
|
||||
yield header.__class__(header[0].lower(), header[1])
|
||||
else:
|
||||
yield (header[0].lower(), header[1])
|
||||
|
||||
|
||||
def _strip_surrounding_whitespace(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags | None) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Given an iterable of header two-tuples, strip both leading and trailing
|
||||
whitespace from both header names and header values. This generator
|
||||
produces tuples that preserve the original type of the header tuple for
|
||||
tuple and any ``HeaderTuple``.
|
||||
"""
|
||||
for header in headers:
|
||||
if isinstance(header, HeaderTuple):
|
||||
yield header.__class__(header[0].strip(), header[1].strip())
|
||||
else:
|
||||
yield (header[0].strip(), header[1].strip())
|
||||
|
||||
|
||||
def _strip_connection_headers(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags | None) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Strip any connection headers as per RFC7540 § 8.1.2.2.
|
||||
"""
|
||||
for header in headers:
|
||||
if header[0] not in CONNECTION_HEADERS:
|
||||
yield header
|
||||
|
||||
|
||||
def _check_sent_host_authority_header(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Raises an InvalidHeaderBlockError if we try to send a header block
|
||||
that does not contain an :authority or a Host header, or if
|
||||
the header block contains both fields, but their values do not match.
|
||||
"""
|
||||
# We only expect to see :authority and Host headers on request header
|
||||
# blocks that aren't trailers, so skip this validation if this is a
|
||||
# response header or we're looking at trailer blocks.
|
||||
skip_validation = (
|
||||
hdr_validation_flags.is_response_header or
|
||||
hdr_validation_flags.is_trailer
|
||||
)
|
||||
if skip_validation:
|
||||
return (h for h in headers)
|
||||
|
||||
return _validate_host_authority_header(headers)
|
||||
|
||||
|
||||
def _combine_cookie_fields(headers: Iterable[Header], hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
RFC 7540 § 8.1.2.5 allows HTTP/2 clients to split the Cookie header field,
|
||||
which must normally appear only once, into multiple fields for better
|
||||
compression. However, they MUST be joined back up again when received.
|
||||
This normalization step applies that transform. The side-effect is that
|
||||
all cookie fields now appear *last* in the header block.
|
||||
"""
|
||||
# There is a problem here about header indexing. Specifically, it's
|
||||
# possible that all these cookies are sent with different header indexing
|
||||
# values. At this point it shouldn't matter too much, so we apply our own
|
||||
# logic and make them never-indexed.
|
||||
cookies: list[bytes] = []
|
||||
for header in headers:
|
||||
if header[0] == b"cookie":
|
||||
cookies.append(header[1])
|
||||
else:
|
||||
yield header
|
||||
if cookies:
|
||||
cookie_val = b"; ".join(cookies)
|
||||
yield NeverIndexedHeaderTuple(b"cookie", cookie_val)
|
||||
|
||||
|
||||
def _split_outbound_cookie_fields(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags | None) -> Generator[Header, None, None]:
|
||||
"""
|
||||
RFC 7540 § 8.1.2.5 allows for better compression efficiency,
|
||||
to split the Cookie header field into separate header fields
|
||||
|
||||
We want to do it for outbound requests, as we are doing for
|
||||
inbound.
|
||||
"""
|
||||
for header in headers:
|
||||
assert isinstance(header[0], bytes)
|
||||
assert isinstance(header[1], bytes)
|
||||
if header[0] == b"cookie":
|
||||
for cookie_val in header[1].split(b"; "):
|
||||
if isinstance(header, HeaderTuple):
|
||||
yield header.__class__(header[0], cookie_val)
|
||||
else:
|
||||
yield header[0], cookie_val
|
||||
else:
|
||||
yield header
|
||||
|
||||
|
||||
def normalize_outbound_headers(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags | None,
|
||||
should_split_outbound_cookies: bool=False) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Normalizes a header sequence that we are about to send.
|
||||
|
||||
:param headers: The HTTP header set.
|
||||
:param hdr_validation_flags: An instance of HeaderValidationFlags.
|
||||
:param should_split_outbound_cookies: boolean flag
|
||||
"""
|
||||
headers = _lowercase_header_names(headers, hdr_validation_flags)
|
||||
if should_split_outbound_cookies:
|
||||
headers = _split_outbound_cookie_fields(headers, hdr_validation_flags)
|
||||
headers = _strip_surrounding_whitespace(headers, hdr_validation_flags)
|
||||
headers = _strip_connection_headers(headers, hdr_validation_flags)
|
||||
return _secure_headers(headers, hdr_validation_flags)
|
||||
|
||||
|
||||
|
||||
def normalize_inbound_headers(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Normalizes a header sequence that we have received.
|
||||
|
||||
:param headers: The HTTP header set.
|
||||
:param hdr_validation_flags: An instance of HeaderValidationFlags
|
||||
"""
|
||||
return _combine_cookie_fields(headers, hdr_validation_flags)
|
||||
|
||||
|
||||
def validate_outbound_headers(headers: Iterable[Header],
|
||||
hdr_validation_flags: HeaderValidationFlags) -> Generator[Header, None, None]:
|
||||
"""
|
||||
Validates and normalizes a header sequence that we are about to send.
|
||||
|
||||
:param headers: The HTTP header set.
|
||||
:param hdr_validation_flags: An instance of HeaderValidationFlags.
|
||||
"""
|
||||
headers = _reject_te(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _reject_connection_header(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _reject_pseudo_header_fields(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
headers = _check_sent_host_authority_header(
|
||||
headers, hdr_validation_flags,
|
||||
)
|
||||
return _check_path_header(headers, hdr_validation_flags)
|
||||
|
||||
|
||||
|
||||
class SizeLimitDict(collections.OrderedDict[int, Any]):
|
||||
|
||||
def __init__(self, *args: dict[int, int], **kwargs: Any) -> None:
|
||||
self._size_limit = kwargs.pop("size_limit", None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self._check_size_limit()
|
||||
|
||||
def __setitem__(self, key: int, value: Any | int) -> None:
|
||||
super().__setitem__(key, value)
|
||||
|
||||
self._check_size_limit()
|
||||
|
||||
def _check_size_limit(self) -> None:
|
||||
if self._size_limit is not None:
|
||||
while len(self) > self._size_limit:
|
||||
self.popitem(last=False)
|
||||
133
path/to/venv/lib/python3.12/site-packages/h2/windows.py
Normal file
133
path/to/venv/lib/python3.12/site-packages/h2/windows.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
h2/windows
|
||||
~~~~~~~~~~
|
||||
|
||||
Defines tools for managing HTTP/2 flow control windows.
|
||||
|
||||
The objects defined in this module are used to automatically manage HTTP/2
|
||||
flow control windows. Specifically, they keep track of what the size of the
|
||||
window is, how much data has been consumed from that window, and how much data
|
||||
the user has already used. It then implements a basic algorithm that attempts
|
||||
to manage the flow control window without user input, trying to ensure that it
|
||||
does not emit too many WINDOW_UPDATE frames.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from .exceptions import FlowControlError
|
||||
|
||||
# The largest acceptable value for a HTTP/2 flow control window.
|
||||
LARGEST_FLOW_CONTROL_WINDOW = 2**31 - 1
|
||||
|
||||
|
||||
class WindowManager:
|
||||
"""
|
||||
A basic HTTP/2 window manager.
|
||||
|
||||
:param max_window_size: The maximum size of the flow control window.
|
||||
:type max_window_size: ``int``
|
||||
"""
|
||||
|
||||
def __init__(self, max_window_size: int) -> None:
|
||||
assert max_window_size <= LARGEST_FLOW_CONTROL_WINDOW
|
||||
self.max_window_size = max_window_size
|
||||
self.current_window_size = max_window_size
|
||||
self._bytes_processed = 0
|
||||
|
||||
def window_consumed(self, size: int) -> None:
|
||||
"""
|
||||
We have received a certain number of bytes from the remote peer. This
|
||||
necessarily shrinks the flow control window!
|
||||
|
||||
:param size: The number of flow controlled bytes we received from the
|
||||
remote peer.
|
||||
:type size: ``int``
|
||||
:returns: Nothing.
|
||||
:rtype: ``None``
|
||||
"""
|
||||
self.current_window_size -= size
|
||||
if self.current_window_size < 0:
|
||||
msg = "Flow control window shrunk below 0"
|
||||
raise FlowControlError(msg)
|
||||
|
||||
def window_opened(self, size: int) -> None:
|
||||
"""
|
||||
The flow control window has been incremented, either because of manual
|
||||
flow control management or because of the user changing the flow
|
||||
control settings. This can have the effect of increasing what we
|
||||
consider to be the "maximum" flow control window size.
|
||||
|
||||
This does not increase our view of how many bytes have been processed,
|
||||
only of how much space is in the window.
|
||||
|
||||
:param size: The increment to the flow control window we received.
|
||||
:type size: ``int``
|
||||
:returns: Nothing
|
||||
:rtype: ``None``
|
||||
"""
|
||||
self.current_window_size += size
|
||||
|
||||
if self.current_window_size > LARGEST_FLOW_CONTROL_WINDOW:
|
||||
msg = f"Flow control window mustn't exceed {LARGEST_FLOW_CONTROL_WINDOW}"
|
||||
raise FlowControlError(msg)
|
||||
|
||||
self.max_window_size = max(self.current_window_size, self.max_window_size)
|
||||
|
||||
def process_bytes(self, size: int) -> int | None:
|
||||
"""
|
||||
The application has informed us that it has processed a certain number
|
||||
of bytes. This may cause us to want to emit a window update frame. If
|
||||
we do want to emit a window update frame, this method will return the
|
||||
number of bytes that we should increment the window by.
|
||||
|
||||
:param size: The number of flow controlled bytes that the application
|
||||
has processed.
|
||||
:type size: ``int``
|
||||
:returns: The number of bytes to increment the flow control window by,
|
||||
or ``None``.
|
||||
:rtype: ``int`` or ``None``
|
||||
"""
|
||||
self._bytes_processed += size
|
||||
return self._maybe_update_window()
|
||||
|
||||
def _maybe_update_window(self) -> int | None:
|
||||
"""
|
||||
Run the algorithm.
|
||||
|
||||
Our current algorithm can be described like this.
|
||||
|
||||
1. If no bytes have been processed, we immediately return 0. There is
|
||||
no meaningful way for us to hand space in the window back to the
|
||||
remote peer, so let's not even try.
|
||||
2. If there is no space in the flow control window, and we have
|
||||
processed at least 1024 bytes (or 1/4 of the window, if the window
|
||||
is smaller), we will emit a window update frame. This is to avoid
|
||||
the risk of blocking a stream altogether.
|
||||
3. If there is space in the flow control window, and we have processed
|
||||
at least 1/2 of the window worth of bytes, we will emit a window
|
||||
update frame. This is to minimise the number of window update frames
|
||||
we have to emit.
|
||||
|
||||
In a healthy system with large flow control windows, this will
|
||||
irregularly emit WINDOW_UPDATE frames. This prevents us starving the
|
||||
connection by emitting eleventy bajillion WINDOW_UPDATE frames,
|
||||
especially in situations where the remote peer is sending a lot of very
|
||||
small DATA frames.
|
||||
"""
|
||||
# TODO: Can the window be smaller than 1024 bytes? If not, we can
|
||||
# streamline this algorithm.
|
||||
if not self._bytes_processed:
|
||||
return None
|
||||
|
||||
max_increment = (self.max_window_size - self.current_window_size)
|
||||
increment = 0
|
||||
|
||||
# Note that, even though we may increment less than _bytes_processed,
|
||||
# we still want to set it to zero whenever we emit an increment. This
|
||||
# is because we'll always increment up to the maximum we can.
|
||||
if ((self.current_window_size == 0) and (
|
||||
self._bytes_processed > min(1024, self.max_window_size // 4))) or self._bytes_processed >= (self.max_window_size // 2):
|
||||
increment = min(self._bytes_processed, max_increment)
|
||||
self._bytes_processed = 0
|
||||
|
||||
self.current_window_size += increment
|
||||
return increment
|
||||
Reference in New Issue
Block a user